repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
keflavich/scikit-image | doc/examples/plot_adapt_rgb.py | 4 | 4109 | """
=========================================
Adapting gray-scale filters to RGB images
=========================================
There are many filters that are designed to work with gray-scale images but not
with color images. To simplify the process of creating functions that can adapt
to RGB images, scikit-image provides the ``adapt_rgb`` decorator.
To actually use the ``adapt_rgb`` decorator, you have to decide how you want to
adapt the RGB image for use with the gray-scale filter. There are two
pre-defined handlers:
``each_channel``
Pass each of the RGB channels to the filter one-by-one, and stitch the
results back into an RGB image.
``hsv_value``
Convert the RGB image to HSV and pass the value channel to the filter.
The filtered result is inserted back into the HSV image and converted
back to RGB.
Below, we demonstrate the use of ``adapt_rgb`` on a couple of gray-scale
filters:
"""
from skimage.color.adapt_rgb import adapt_rgb, each_channel, hsv_value
from skimage import filters
@adapt_rgb(each_channel)
def sobel_each(image):
return filters.sobel(image)
@adapt_rgb(hsv_value)
def sobel_hsv(image):
return filters.sobel(image)
"""
We can use these functions as we would normally use them, but now they work
with both gray-scale and color images. Let's plot the results with a color
image:
"""
from skimage import data
from skimage.exposure import rescale_intensity
import matplotlib.pyplot as plt
image = data.astronaut()
fig = plt.figure(figsize=(14, 7))
ax_each = fig.add_subplot(121)
ax_hsv = fig.add_subplot(122)
# We use 1 - sobel_each(image) but this will not work if image is not normalized
ax_each.imshow( rescale_intensity(1-sobel_each(image)))
ax_each.set_xticks([]), ax_each.set_yticks([])
ax_each.set_title("Sobel filter computed\n on individual RGB channels")
# We use 1 - sobel_hsv(image) but this will not work if image is not normalized
ax_hsv.imshow(rescale_intensity(1 - sobel_hsv(image)))
ax_hsv.set_xticks([]), ax_hsv.set_yticks([])
ax_hsv.set_title("Sobel filter computed\n on (V)alue converted image (HSV)")
"""
.. image:: PLOT2RST.current_figure
Notice that the result for the value-filtered image preserves the color of the
original image, but channel filtered image combines in a more surprising way.
In other common cases, smoothing for example, the channel filtered image will
produce a better result than the value-filtered image.
You can also create your own handler functions for ``adapt_rgb``. To do so,
just create a function with the following signature::
def handler(image_filter, image, *args, **kwargs):
# Manipulate RGB image here...
image = image_filter(image, *args, **kwargs)
# Manipulate filtered image here...
return image
Note that ``adapt_rgb`` handlers are written for filters where the image is the
first argument.
As a very simple example, we can just convert any RGB image to grayscale and
then return the filtered result:
"""
from skimage.color import rgb2gray
def as_gray(image_filter, image, *args, **kwargs):
gray_image = rgb2gray(image)
return image_filter(gray_image, *args, **kwargs)
"""
It's important to create a signature that uses ``*args`` and ``**kwargs`` to
pass arguments along to the filter so that the decorated function is allowed to
have any number of positional and keyword arguments.
Finally, we can use this handler with ``adapt_rgb`` just as before:
"""
@adapt_rgb(as_gray)
def sobel_gray(image):
return filters.sobel(image)
fig = plt.figure(figsize=(7, 7))
ax = fig.add_subplot(111)
# We use 1 - sobel_gray(image) but this will not work if image is not normalized
ax.imshow( rescale_intensity(1 - sobel_gray(image)), cmap=plt.cm.gray)
ax.set_xticks([]), ax.set_yticks([])
ax.set_title("Sobel filter computed\n on the converted grayscale image")
plt.show()
"""
.. image:: PLOT2RST.current_figure
.. note::
A very simple check of the array shape is used for detecting RGB images, so
``adapt_rgb`` is not recommended for functions that support 3D volumes or
color images in non-RGB spaces.
"""
| bsd-3-clause |
trungnt13/scikit-learn | examples/plot_digits_pipe.py | 250 | 1809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
nomadcube/scikit-learn | sklearn/datasets/tests/test_20news.py | 280 | 3045 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
stefan-balke/librosa | librosa/core/dtw.py | 1 | 11796 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Sequence Alignment with Dynamic Time Warping."""
import numpy as np
from scipy.spatial.distance import cdist
from ..util.decorators import optional_jit
from ..util.exceptions import ParameterError
__all__ = ['dtw', 'fill_off_diagonal']
def fill_off_diagonal(x, radius, value=0):
"""Sets all cells of a matrix to a given ``value``
if they lie outside a constraint region.
In this case, the constraint region is the
Sakoe-Chiba band which runs with a fixed ``radius``
along the main diagonal.
When ``x.shape[0] != x.shape[1]``, the radius will be
expanded so that ``x[-1, -1] = 1`` always.
``x`` will be modified in place.
Parameters
----------
x : np.ndarray [shape=(N, M)]
Input matrix, will be modified in place.
radius : float
The band radius (1/2 of the width) will be
``int(radius*min(x.shape))``.
value : int
``x[n, m] = value`` when ``(n, m)`` lies outside the band.
Examples
--------
>>> x = np.ones((8, 8))
>>> global_constraints(x, 0.25)
>>> x
array([[1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1]])
>>> x = np.ones((8, 12))
>>> global_constraints(x, 0.25)
>>> x
array([[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
"""
nx, ny = x.shape
# Calculate the radius in indices, rather than proportion
radius = np.round(radius * np.min(x.shape))
nx, ny = x.shape
offset = np.abs((x.shape[0] - x.shape[1]))
if nx < ny:
idx_u = np.triu_indices_from(x, k=radius + offset)
idx_l = np.tril_indices_from(x, k=-radius)
else:
idx_u = np.triu_indices_from(x, k=radius)
idx_l = np.tril_indices_from(x, k=-radius - offset)
# modify input matrix
x[idx_u] = value
x[idx_l] = value
def dtw(X=None, Y=None, C=None, metric='euclidean', step_sizes_sigma=None,
weights_add=None, weights_mul=None, subseq=False, backtrack=True,
global_constraints=False, band_rad=0.25):
'''Dynamic time warping (DTW).
This function performs a DTW and path backtracking on two sequences.
We follow the nomenclature and algorithmic approach as described in [1].
.. [1] Meinard Mueller
Fundamentals of Music Processing — Audio, Analysis, Algorithms, Applications
Springer Verlag, ISBN: 978-3-319-21944-8, 2015.
Parameters
----------
X : np.ndarray [shape=(K, N)]
audio feature matrix (e.g., chroma features)
Y : np.ndarray [shape=(K, M)]
audio feature matrix (e.g., chroma features)
C : np.ndarray [shape=(N, M)]
Precomputed distance matrix. If supplied, X and Y must not be supplied and
``metric`` will be ignored.
metric : str
Identifier for the cost-function as documented
in `scipy.spatial.cdist()`
step_sizes_sigma : np.ndarray [shape=[n, 2]]
Specifies allowed step sizes as used by the dtw.
weights_add : np.ndarray [shape=[n, ]]
Additive weights to penalize certain step sizes.
weights_mul : np.ndarray [shape=[n, ]]
Multiplicative weights to penalize certain step sizes.
subseq : binary
Enable subsequence DTW, e.g., for retrieval tasks.
backtrack : binary
Enable backtracking in accumulated cost matrix.
global_constraints : binary
Applies global constraints to the cost matrix ``C`` (Sakoe-Chiba band).
band_rad : float
The Sakoe-Chiba band radius (1/2 of the width) will be
``int(radius*min(C.shape))``.
Returns
-------
D : np.ndarray [shape=(N,M)]
accumulated cost matrix.
D[N,M] is the total alignment cost.
When doing subsequence DTW, D[N,:] indicates a matching function.
wp : np.ndarray [shape=(N,2)]
Warping path with index pairs.
Each row of the array contains an index pair n,m).
Only returned when ``backtrack`` is True.
Raises
------
ParameterError
If you are doing diagonal matching and Y is shorter than X or if an incompatible
combination of X, Y, and C are supplied.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file(), offset=10, duration=15)
>>> X = librosa.feature.chroma_cens(y=y, sr=sr)
>>> noise = np.random.rand(X.shape[0], 200)
>>> Y = np.concatenate((noise, noise, X, noise), axis=1)
>>> D, wp = librosa.dtw(X, Y, subseq=True)
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(D, x_axis='frames', y_axis='frames')
>>> plt.title('Database excerpt')
>>> plt.plot(wp[:, 1], wp[:, 0], label='Optimal path', color='y')
>>> plt.legend()
>>> plt.subplot(2, 1, 2)
>>> plt.plot(D[-1, :] / wp.shape[0])
>>> plt.xlim([0, Y.shape[1]])
>>> plt.ylim([0, 2])
>>> plt.title('Matching cost function')
>>> plt.tight_layout()
'''
# Default Parameters
if step_sizes_sigma is None:
step_sizes_sigma = np.array([[1, 1], [0, 1], [1, 0]])
if weights_add is None:
weights_add = np.zeros(len(step_sizes_sigma))
if weights_mul is None:
weights_mul = np.ones(len(step_sizes_sigma))
if len(step_sizes_sigma) != len(weights_add):
raise ParameterError('len(weights_add) must be equal to len(step_sizes_sigma)')
if len(step_sizes_sigma) != len(weights_mul):
raise ParameterError('len(weights_mul) must be equal to len(step_sizes_sigma)')
if C is None and (X is None or Y is None):
raise ParameterError('If C is not supplied, both X and Y must be supplied')
if C is not None and (X is not None or Y is not None):
raise ParameterError('If C is supplied, both X and Y must not be supplied')
# calculate pair-wise distances, unless already supplied.
if C is None:
# take care of dimensions
X = np.atleast_2d(X)
Y = np.atleast_2d(Y)
C = cdist(X.T, Y.T, metric=metric)
C = np.atleast_2d(C)
# if diagonal matching, Y has to be longer than X
# (X simply cannot be contained in Y)
if np.array_equal(step_sizes_sigma, np.array([[1, 1]])) and (C.shape[0] > C.shape[1]):
raise ParameterError('For diagonal matching: Y.shape[1] >= X.shape[1] '
'(C.shape[1] >= C.shape[0])')
max_0 = step_sizes_sigma[:, 0].max()
max_1 = step_sizes_sigma[:, 1].max()
if global_constraints:
# Apply global constraints to the cost matrix
fill_off_diagonal(C, band_rad, value=np.inf)
# initialize whole matrix with infinity values
D = np.ones(C.shape + np.array([max_0, max_1])) * np.inf
# set starting point to C[0, 0]
D[max_0, max_1] = C[0, 0]
if subseq:
D[max_0, max_1:] = C[0, :]
D_steps = np.empty(D.shape, dtype=np.int)
# calculate accumulated cost matrix
D, D_steps = calc_accu_cost(C, D, D_steps,
step_sizes_sigma,
weights_mul, weights_add,
max_0, max_1)
# delete infinity rows and columns
D = D[max_0:, max_1:]
D_steps = D_steps[max_0:, max_1:]
if backtrack:
if subseq:
# search for global minimum in last row of D-matrix
wp_end_idx = np.argmin(D[-1, :]) + 1
wp = backtracking(D_steps[:, :wp_end_idx], step_sizes_sigma)
else:
# perform warping path backtracking
wp = backtracking(D_steps, step_sizes_sigma)
return D, np.asarray(wp, dtype=int)
else:
return D
@optional_jit(nopython=True)
def calc_accu_cost(C, D, D_steps, step_sizes_sigma,
weights_mul, weights_add, max_0, max_1):
'''Calculate the accumulated cost matrix D.
Use dynamic programming to calculate the accumulated costs.
Parameters
----------
C : np.ndarray [shape=(N, M)]
pre-computed cost matrix
D : np.ndarray [shape=(N, M)]
accumulated cost matrix
D_steps : np.ndarray [shape=(N, M)]
steps which were used for calculating D
step_sizes_sigma : np.ndarray [shape=[n, 2]]
Specifies allowed step sizes as used by the dtw.
weights_add : np.ndarray [shape=[n, ]]
Additive weights to penalize certain step sizes.
weights_mul : np.ndarray [shape=[n, ]]
Multiplicative weights to penalize certain step sizes.
max_0 : int
maximum number of steps in step_sizes_sigma in dim 0.
max_1 : int
maximum number of steps in step_sizes_sigma in dim 1.
Returns
-------
D : np.ndarray [shape=(N,M)]
accumulated cost matrix.
D[N,M] is the total alignment cost.
When doing subsequence DTW, D[N,:] indicates a matching function.
D_steps : np.ndarray [shape=(N,M)]
steps which were used for calculating D.
See Also
--------
dtw
'''
for cur_n in range(max_0, D.shape[0]):
for cur_m in range(max_1, D.shape[1]):
# accumulate costs
for cur_step_idx, cur_w_add, cur_w_mul in zip(range(step_sizes_sigma.shape[0]),
weights_add, weights_mul):
cur_D = D[cur_n - step_sizes_sigma[cur_step_idx, 0],
cur_m - step_sizes_sigma[cur_step_idx, 1]]
cur_C = cur_w_mul * C[cur_n - max_0, cur_m - max_1]
cur_C += cur_w_add
cur_cost = cur_D + cur_C
# check if cur_cost is smaller than the one stored in D
if cur_cost < D[cur_n, cur_m]:
D[cur_n, cur_m] = cur_cost
# save step-index
D_steps[cur_n, cur_m] = cur_step_idx
return D, D_steps
@optional_jit(nopython=True)
def backtracking(D_steps, step_sizes_sigma):
'''Backtrack optimal warping path.
Uses the saved step sizes from the cost accumulation
step to backtrack the index pairs for an optimal
warping path.
Parameters
----------
D_steps : np.ndarray [shape=(N, M)]
Saved indices of the used steps used in the calculation of D.
step_sizes_sigma : np.ndarray [shape=[n, 2]]
Specifies allowed step sizes as used by the dtw.
Returns
-------
wp : list [shape=(N,)]
Warping path with index pairs.
Each list entry contains an index pair
(n,m) as a tuple
See Also
--------
dtw
'''
wp = []
# Set starting point D(N,M) and append it to the path
cur_idx = (D_steps.shape[0] - 1, D_steps.shape[1] - 1)
wp.append((cur_idx[0], cur_idx[1]))
# Loop backwards.
# Stop criteria:
# Setting it to (0, 0) does not work for the subsequence dtw,
# so we only ask to reach the first row of the matrix.
while cur_idx[0] > 0:
cur_step_idx = D_steps[(cur_idx[0], cur_idx[1])]
# save tuple with minimal acc. cost in path
cur_idx = (cur_idx[0] - step_sizes_sigma[cur_step_idx][0],
cur_idx[1] - step_sizes_sigma[cur_step_idx][1])
# append to warping path
wp.append((cur_idx[0], cur_idx[1]))
return wp
| isc |
jjx02230808/project0223 | sklearn/neural_network/tests/test_mlp.py | 46 | 18585 | """
Testing for Multi-layer Perceptron module (sklearn.neural_network)
"""
# Author: Issam H. Laradji
# Licence: BSD 3 clause
import sys
import warnings
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal
from sklearn.datasets import load_digits, load_boston
from sklearn.datasets import make_regression, make_multilabel_classification
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.metrics import roc_auc_score
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from scipy.sparse import csr_matrix
from sklearn.utils.testing import (assert_raises, assert_greater, assert_equal,
assert_false)
np.seterr(all='warn')
ACTIVATION_TYPES = ["logistic", "tanh", "relu"]
digits_dataset_multi = load_digits(n_class=3)
X_digits_multi = MinMaxScaler().fit_transform(digits_dataset_multi.data[:200])
y_digits_multi = digits_dataset_multi.target[:200]
digits_dataset_binary = load_digits(n_class=2)
X_digits_binary = MinMaxScaler().fit_transform(
digits_dataset_binary.data[:200])
y_digits_binary = digits_dataset_binary.target[:200]
classification_datasets = [(X_digits_multi, y_digits_multi),
(X_digits_binary, y_digits_binary)]
boston = load_boston()
Xboston = StandardScaler().fit_transform(boston.data)[: 200]
yboston = boston.target[:200]
def test_alpha():
# Test that larger alpha yields weights closer to zero"""
X = X_digits_binary[:100]
y = y_digits_binary[:100]
alpha_vectors = []
alpha_values = np.arange(2)
absolute_sum = lambda x: np.sum(np.abs(x))
for alpha in alpha_values:
mlp = MLPClassifier(hidden_layer_sizes=10, alpha=alpha, random_state=1)
mlp.fit(X, y)
alpha_vectors.append(np.array([absolute_sum(mlp.coefs_[0]),
absolute_sum(mlp.coefs_[1])]))
for i in range(len(alpha_values) - 1):
assert (alpha_vectors[i] > alpha_vectors[i + 1]).all()
def test_fit():
# Test that the algorithm solution is equal to a worked out example."""
X = np.array([[0.6, 0.8, 0.7]])
y = np.array([0])
mlp = MLPClassifier(algorithm='sgd', learning_rate_init=0.1, alpha=0.1,
activation='logistic', random_state=1, max_iter=1,
hidden_layer_sizes=2, momentum=0)
# set weights
mlp.coefs_ = [0] * 2
mlp.intercepts_ = [0] * 2
mlp.classes_ = [0, 1]
mlp.n_outputs_ = 1
mlp.coefs_[0] = np.array([[0.1, 0.2], [0.3, 0.1], [0.5, 0]])
mlp.coefs_[1] = np.array([[0.1], [0.2]])
mlp.intercepts_[0] = np.array([0.1, 0.1])
mlp.intercepts_[1] = np.array([1.0])
mlp._coef_grads = [] * 2
mlp._intercept_grads = [] * 2
mlp.label_binarizer_.y_type_ = 'binary'
# Initialize parameters
mlp.n_iter_ = 0
mlp.learning_rate_ = 0.1
# Compute the number of layers
mlp.n_layers_ = 3
# Pre-allocate gradient matrices
mlp._coef_grads = [0] * (mlp.n_layers_ - 1)
mlp._intercept_grads = [0] * (mlp.n_layers_ - 1)
mlp.out_activation_ = 'logistic'
mlp.t_ = 0
mlp.best_loss_ = np.inf
mlp.loss_curve_ = []
mlp._no_improvement_count = 0
mlp._intercept_velocity = [np.zeros_like(intercepts) for
intercepts in
mlp.intercepts_]
mlp._coef_velocity = [np.zeros_like(coefs) for coefs in
mlp.coefs_]
mlp.partial_fit(X, y, classes=[0, 1])
# Manually worked out example
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.1 + 0.8 * 0.3 + 0.7 * 0.5 + 0.1)
# = 0.679178699175393
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.2 + 0.8 * 0.1 + 0.7 * 0 + 0.1)
# = 0.574442516811659
# o1 = g(h * W2 + b21) = g(0.679 * 0.1 + 0.574 * 0.2 + 1)
# = 0.7654329236196236
# d21 = -(0 - 0.765) = 0.765
# d11 = (1 - 0.679) * 0.679 * 0.765 * 0.1 = 0.01667
# d12 = (1 - 0.574) * 0.574 * 0.765 * 0.2 = 0.0374
# W1grad11 = X1 * d11 + alpha * W11 = 0.6 * 0.01667 + 0.1 * 0.1 = 0.0200
# W1grad11 = X1 * d12 + alpha * W12 = 0.6 * 0.0374 + 0.1 * 0.2 = 0.04244
# W1grad21 = X2 * d11 + alpha * W13 = 0.8 * 0.01667 + 0.1 * 0.3 = 0.043336
# W1grad22 = X2 * d12 + alpha * W14 = 0.8 * 0.0374 + 0.1 * 0.1 = 0.03992
# W1grad31 = X3 * d11 + alpha * W15 = 0.6 * 0.01667 + 0.1 * 0.5 = 0.060002
# W1grad32 = X3 * d12 + alpha * W16 = 0.6 * 0.0374 + 0.1 * 0 = 0.02244
# W2grad1 = h1 * d21 + alpha * W21 = 0.679 * 0.765 + 0.1 * 0.1 = 0.5294
# W2grad2 = h2 * d21 + alpha * W22 = 0.574 * 0.765 + 0.1 * 0.2 = 0.45911
# b1grad1 = d11 = 0.01667
# b1grad2 = d12 = 0.0374
# b2grad = d21 = 0.765
# W1 = W1 - eta * [W1grad11, .., W1grad32] = [[0.1, 0.2], [0.3, 0.1],
# [0.5, 0]] - 0.1 * [[0.0200, 0.04244], [0.043336, 0.03992],
# [0.060002, 0.02244]] = [[0.098, 0.195756], [0.2956664,
# 0.096008], [0.4939998, -0.002244]]
# W2 = W2 - eta * [W2grad1, W2grad2] = [[0.1], [0.2]] - 0.1 *
# [[0.5294], [0.45911]] = [[0.04706], [0.154089]]
# b1 = b1 - eta * [b1grad1, b1grad2] = 0.1 - 0.1 * [0.01667, 0.0374]
# = [0.098333, 0.09626]
# b2 = b2 - eta * b2grad = 1.0 - 0.1 * 0.765 = 0.9235
assert_almost_equal(mlp.coefs_[0], np.array([[0.098, 0.195756],
[0.2956664, 0.096008],
[0.4939998, -0.002244]]),
decimal=3)
assert_almost_equal(mlp.coefs_[1], np.array([[0.04706], [0.154089]]),
decimal=3)
assert_almost_equal(mlp.intercepts_[0],
np.array([0.098333, 0.09626]), decimal=3)
assert_almost_equal(mlp.intercepts_[1], np.array(0.9235), decimal=3)
# Testing output
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.098 + 0.8 * 0.2956664 +
# 0.7 * 0.4939998 + 0.098333) = 0.677
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.195756 + 0.8 * 0.096008 +
# 0.7 * -0.002244 + 0.09626) = 0.572
# o1 = h * W2 + b21 = 0.677 * 0.04706 +
# 0.572 * 0.154089 + 0.9235 = 1.043
assert_almost_equal(mlp.decision_function(X), 1.043, decimal=3)
def test_gradient():
# Test gradient.
# This makes sure that the activation functions and their derivatives
# are correct. The numerical and analytical computation of the gradient
# should be close.
for n_labels in [2, 3]:
n_samples = 5
n_features = 10
X = np.random.random((n_samples, n_features))
y = 1 + np.mod(np.arange(n_samples) + 1, n_labels)
Y = LabelBinarizer().fit_transform(y)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(activation=activation, hidden_layer_sizes=10,
algorithm='l-bfgs', alpha=1e-5,
learning_rate_init=0.2, max_iter=1,
random_state=1)
mlp.fit(X, y)
theta = np.hstack([l.ravel() for l in mlp.coefs_ +
mlp.intercepts_])
layer_units = ([X.shape[1]] + [mlp.hidden_layer_sizes] +
[mlp.n_outputs_])
activations = []
deltas = []
coef_grads = []
intercept_grads = []
activations.append(X)
for i in range(mlp.n_layers_ - 1):
activations.append(np.empty((X.shape[0],
layer_units[i + 1])))
deltas.append(np.empty((X.shape[0],
layer_units[i + 1])))
fan_in = layer_units[i]
fan_out = layer_units[i + 1]
coef_grads.append(np.empty((fan_in, fan_out)))
intercept_grads.append(np.empty(fan_out))
# analytically compute the gradients
def loss_grad_fun(t):
return mlp._loss_grad_lbfgs(t, X, Y, activations, deltas,
coef_grads, intercept_grads)
[value, grad] = loss_grad_fun(theta)
numgrad = np.zeros(np.size(theta))
n = np.size(theta, 0)
E = np.eye(n)
epsilon = 1e-5
# numerically compute the gradients
for i in range(n):
dtheta = E[:, i] * epsilon
numgrad[i] = ((loss_grad_fun(theta + dtheta)[0] -
loss_grad_fun(theta - dtheta)[0]) /
(epsilon * 2.0))
assert_almost_equal(numgrad, grad)
def test_lbfgs_classification():
# Test lbfgs on classification.
# It should achieve a score higher than 0.95 for the binary and multi-class
# versions of the digits dataset.
for X, y in classification_datasets:
X_train = X[:150]
y_train = y[:150]
X_test = X[150:]
expected_shape_dtype = (X_test.shape[0], y_train.dtype.kind)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(algorithm='l-bfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X_train, y_train)
y_predict = mlp.predict(X_test)
assert_greater(mlp.score(X_train, y_train), 0.95)
assert_equal((y_predict.shape[0], y_predict.dtype.kind),
expected_shape_dtype)
def test_lbfgs_regression():
# Test lbfgs on the boston dataset, a regression problems."""
X = Xboston
y = yboston
for activation in ACTIVATION_TYPES:
mlp = MLPRegressor(algorithm='l-bfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X, y)
assert_greater(mlp.score(X, y), 0.95)
def test_learning_rate_warmstart():
# Tests that warm_start reuses past solution."""
X = [[3, 2], [1, 6], [5, 6], [-2, -4]]
y = [1, 1, 1, 0]
for learning_rate in ["invscaling", "constant"]:
mlp = MLPClassifier(algorithm='sgd', hidden_layer_sizes=4,
learning_rate=learning_rate, max_iter=1,
power_t=0.25, warm_start=True)
mlp.fit(X, y)
prev_eta = mlp._optimizer.learning_rate
mlp.fit(X, y)
post_eta = mlp._optimizer.learning_rate
if learning_rate == 'constant':
assert_equal(prev_eta, post_eta)
elif learning_rate == 'invscaling':
assert_equal(mlp.learning_rate_init / pow(8 + 1, mlp.power_t),
post_eta)
def test_multilabel_classification():
# Test that multi-label classification works as expected."""
# test fit method
X, y = make_multilabel_classification(n_samples=50, random_state=0,
return_indicator=True)
mlp = MLPClassifier(algorithm='l-bfgs', hidden_layer_sizes=50, alpha=1e-5,
max_iter=150, random_state=0, activation='logistic',
learning_rate_init=0.2)
mlp.fit(X, y)
assert_equal(mlp.score(X, y), 1)
# test partial fit method
mlp = MLPClassifier(algorithm='sgd', hidden_layer_sizes=50, max_iter=150,
random_state=0, activation='logistic', alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=[0, 1, 2, 3, 4])
assert_greater(mlp.score(X, y), 0.9)
def test_multioutput_regression():
# Test that multi-output regression works as expected"""
X, y = make_regression(n_samples=200, n_targets=5)
mlp = MLPRegressor(algorithm='l-bfgs', hidden_layer_sizes=50, max_iter=200,
random_state=1)
mlp.fit(X, y)
assert_greater(mlp.score(X, y), 0.9)
def test_partial_fit_classes_error():
# Tests that passing different classes to partial_fit raises an error"""
X = [[3, 2]]
y = [0]
clf = MLPClassifier(algorithm='sgd')
clf.partial_fit(X, y, classes=[0, 1])
assert_raises(ValueError, clf.partial_fit, X, y, classes=[1, 2])
def test_partial_fit_classification():
# Test partial_fit on classification.
# `partial_fit` should yield the same results as 'fit'for binary and
# multi-class classification.
for X, y in classification_datasets:
X = X
y = y
mlp = MLPClassifier(algorithm='sgd', max_iter=100, random_state=1,
tol=0, alpha=1e-5, learning_rate_init=0.2)
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPClassifier(algorithm='sgd', random_state=1, alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=np.unique(y))
pred2 = mlp.predict(X)
assert_array_equal(pred1, pred2)
assert_greater(mlp.score(X, y), 0.95)
def test_partial_fit_regression():
# Test partial_fit on regression.
# `partial_fit` should yield the same results as 'fit' for regression.
X = Xboston
y = yboston
for momentum in [0, .9]:
mlp = MLPRegressor(algorithm='sgd', max_iter=100, activation='relu',
random_state=1, learning_rate_init=0.01,
batch_size=X.shape[0], momentum=momentum)
with warnings.catch_warnings(record=True):
# catch convergence warning
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPRegressor(algorithm='sgd', activation='relu',
learning_rate_init=0.01, random_state=1,
batch_size=X.shape[0], momentum=momentum)
for i in range(100):
mlp.partial_fit(X, y)
pred2 = mlp.predict(X)
assert_almost_equal(pred1, pred2, decimal=2)
score = mlp.score(X, y)
assert_greater(score, 0.75)
def test_partial_fit_errors():
# Test partial_fit error handling."""
X = [[3, 2], [1, 6]]
y = [1, 0]
# no classes passed
assert_raises(ValueError,
MLPClassifier(
algorithm='sgd').partial_fit,
X, y,
classes=[2])
# l-bfgs doesn't support partial_fit
assert_false(hasattr(MLPClassifier(algorithm='l-bfgs'), 'partial_fit'))
def test_params_errors():
# Test that invalid parameters raise value error"""
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier
assert_raises(ValueError, clf(hidden_layer_sizes=-1).fit, X, y)
assert_raises(ValueError, clf(max_iter=-1).fit, X, y)
assert_raises(ValueError, clf(shuffle='true').fit, X, y)
assert_raises(ValueError, clf(alpha=-1).fit, X, y)
assert_raises(ValueError, clf(learning_rate_init=-1).fit, X, y)
assert_raises(ValueError, clf(algorithm='hadoken').fit, X, y)
assert_raises(ValueError, clf(learning_rate='converge').fit, X, y)
assert_raises(ValueError, clf(activation='cloak').fit, X, y)
def test_predict_proba_binary():
# Test that predict_proba works as expected for binary class."""
X = X_digits_binary[:50]
y = y_digits_binary[:50]
clf = MLPClassifier(hidden_layer_sizes=5)
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], 2
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
assert_equal(roc_auc_score(y, y_proba[:, 1]), 1.0)
def test_predict_proba_multi():
# Test that predict_proba works as expected for multi class."""
X = X_digits_multi[:10]
y = y_digits_multi[:10]
clf = MLPClassifier(hidden_layer_sizes=5)
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], np.unique(y).size
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
def test_sparse_matrices():
# Test that sparse and dense input matrices output the same results."""
X = X_digits_binary[:50]
y = y_digits_binary[:50]
X_sparse = csr_matrix(X)
mlp = MLPClassifier(random_state=1, hidden_layer_sizes=15)
mlp.fit(X, y)
pred1 = mlp.decision_function(X)
mlp.fit(X_sparse, y)
pred2 = mlp.decision_function(X_sparse)
assert_almost_equal(pred1, pred2)
pred1 = mlp.predict(X)
pred2 = mlp.predict(X_sparse)
assert_array_equal(pred1, pred2)
def test_tolerance():
# Test tolerance.
# It should force the algorithm to exit the loop when it converges.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, algorithm='sgd', verbose=10)
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
def test_verbose_sgd():
# Test verbose.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(algorithm='sgd', max_iter=2, verbose=10,
hidden_layer_sizes=2)
old_stdout = sys.stdout
sys.stdout = output = StringIO()
clf.fit(X, y)
clf.partial_fit(X, y)
sys.stdout = old_stdout
assert 'Iteration' in output.getvalue()
def test_early_stopping():
X = X_digits_binary[:100]
y = y_digits_binary[:100]
tol = 0.2
clf = MLPClassifier(tol=tol, max_iter=3000, algorithm='sgd',
early_stopping=True)
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
valid_scores = clf.validation_scores_
best_valid_score = clf.best_validation_score_
assert_equal(max(valid_scores), best_valid_score)
assert_greater(best_valid_score + tol, valid_scores[-2])
assert_greater(best_valid_score + tol, valid_scores[-1])
def test_adaptive_learning_rate():
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, algorithm='sgd',
learning_rate='adaptive', verbose=10)
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
assert_greater(1e-6, clf._optimizer.learning_rate)
| bsd-3-clause |
pianomania/scikit-learn | examples/applications/plot_out_of_core_classification.py | 51 | 13651 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
# --------------------------------
#
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
# ----
#
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batches of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
# ------------
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = ['b', 'g', 'r', 'c', 'm', 'y']
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
cdawei/shogun | examples/undocumented/python_modular/graphical/metric_lmnn_objective.py | 26 | 2350 | #!/usr/bin/env python
def load_compressed_features(fname_features):
try:
import gzip
import numpy
except ImportError:
print 'Error importing gzip and/or numpy modules. Please, verify their installation.'
import sys
sys.exit(0)
# load features from a gz compressed file
file_features = gzip.GzipFile(fname_features)
str_features = file_features.read()
file_features.close()
strlist_features = str_features.split('\n')[:-1] # all but last because the last line also has \n
# the number of lines in the file is the number of vectors
num_vectors = len(strlist_features)
# the number of elements in a line is the number of features
num_features = len(strlist_features[0].split())
# memory pre-allocation for the feature matrix
fm = numpy.zeros((num_vectors, num_features))
# fill in feature matrix
for i in xrange(num_vectors):
try:
fm[i,:] = map(numpy.float64, strlist_features[i].split())
except ValuError:
print 'All the vectors must have the same number of features.'
import sys
sys.exit(0)
return fm
def metric_lmnn_statistics(k=3, fname_features='../../data/fm_train_multiclass_digits.dat.gz', fname_labels='../../data/label_train_multiclass_digits.dat'):
try:
from modshogun import LMNN, CSVFile, RealFeatures, MulticlassLabels, MSG_DEBUG
import matplotlib.pyplot as pyplot
except ImportError:
print 'Error importing modshogun or other required modules. Please, verify their installation.'
return
features = RealFeatures(load_compressed_features(fname_features).T)
labels = MulticlassLabels(CSVFile(fname_labels))
# print 'number of examples = %d' % features.get_num_vectors()
# print 'number of features = %d' % features.get_num_features()
assert(features.get_num_vectors() == labels.get_num_labels())
# train LMNN
lmnn = LMNN(features, labels, k)
lmnn.set_correction(100)
# lmnn.io.set_loglevel(MSG_DEBUG)
print 'Training LMNN, this will take about two minutes...'
lmnn.train()
print 'Training done!'
# plot objective obtained during training
statistics = lmnn.get_statistics()
pyplot.plot(statistics.obj.get())
pyplot.grid(True)
pyplot.xlabel('Iterations')
pyplot.ylabel('LMNN objective')
pyplot.title('LMNN objective during training for the multiclass digits data set')
pyplot.show()
if __name__=='__main__':
print('LMNN objective')
metric_lmnn_statistics()
| gpl-3.0 |
jjx02230808/project0223 | examples/ensemble/plot_voting_probas.py | 316 | 2824 | """
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/pyplots/whats_new_98_4_fancy.py | 1 | 2644 | """
======================
Whats New 0.98.4 Fancy
======================
"""
import matplotlib.patches as mpatch
import matplotlib.pyplot as plt
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
figheight = 8
fig = plt.figure(1, figsize=(9, figheight), dpi=80)
fontsize = 0.4 * fig.dpi
def make_boxstyles(ax):
styles = mpatch.BoxStyle.get_styles()
for i, (stylename, styleclass) in enumerate(sorted(styles.items())):
ax.text(0.5, (float(len(styles)) - 0.5 - i)/len(styles), stylename,
ha="center",
size=fontsize,
transform=ax.transAxes,
bbox=dict(boxstyle=stylename, fc="w", ec="k"))
def make_arrowstyles(ax):
styles = mpatch.ArrowStyle.get_styles()
ax.set_xlim(0, 4)
ax.set_ylim(0, figheight)
for i, (stylename, styleclass) in enumerate(sorted(styles.items())):
y = (float(len(styles)) -0.25 - i) # /figheight
p = mpatch.Circle((3.2, y), 0.2, fc="w")
ax.add_patch(p)
ax.annotate(stylename, (3.2, y),
(2., y),
#xycoords="figure fraction", textcoords="figure fraction",
ha="right", va="center",
size=fontsize,
arrowprops=dict(arrowstyle=stylename,
patchB=p,
shrinkA=5,
shrinkB=5,
fc="w", ec="k",
connectionstyle="arc3,rad=-0.05",
),
bbox=dict(boxstyle="square", fc="w"))
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax1 = fig.add_subplot(121, frameon=False, xticks=[], yticks=[])
make_boxstyles(ax1)
ax2 = fig.add_subplot(122, frameon=False, xticks=[], yticks=[])
make_arrowstyles(ax2)
pltshow(plt)
| mit |
DGrady/pandas | pandas/util/testing.py | 1 | 92648 | from __future__ import division
# pylint: disable-msg=W0402
import re
import string
import sys
import tempfile
import warnings
import inspect
import os
import subprocess
import locale
import traceback
from datetime import datetime
from functools import wraps, partial
from contextlib import contextmanager
from distutils.version import LooseVersion
from numpy.random import randn, rand
import numpy as np
import pandas as pd
from pandas.core.dtypes.missing import array_equivalent
from pandas.core.dtypes.common import (
is_datetimelike_v_numeric,
is_datetimelike_v_object,
is_number, is_bool,
needs_i8_conversion,
is_categorical_dtype,
is_interval_dtype,
is_sequence,
is_list_like)
from pandas.io.formats.printing import pprint_thing
from pandas.core.algorithms import take_1d
import pandas.compat as compat
from pandas.compat import (
filter, map, zip, range, unichr, lrange, lmap, lzip, u, callable, Counter,
raise_with_traceback, httplib, is_platform_windows, is_platform_32bit,
StringIO, PY3
)
from pandas.core.computation import expressions as expr
from pandas import (bdate_range, CategoricalIndex, Categorical, IntervalIndex,
DatetimeIndex, TimedeltaIndex, PeriodIndex, RangeIndex,
Index, MultiIndex,
Series, DataFrame, Panel, Panel4D)
from pandas._libs import testing as _testing
from pandas.io.common import urlopen
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, compat.ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('always', _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('ignore', _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option('^display.', silent=True)
def round_trip_pickle(obj, path=None):
"""
Pickle an object and then read it again.
Parameters
----------
obj : pandas object
The object to pickle and then re-read.
path : str, default None
The path where the pickled object is written and then read.
Returns
-------
round_trip_pickled_object : pandas object
The original object that was pickled and then re-read.
"""
if path is None:
path = u('__{random_bytes}__.pickle'.format(random_bytes=rands(10)))
with ensure_clean(path) as path:
pd.to_pickle(obj, path)
return pd.read_pickle(path)
def round_trip_pathlib(writer, reader, path=None):
"""
Write an object to file specifed by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip('pathlib').Path
if path is None:
path = '___pathlib___'
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path=None):
"""
Write an object to file specifed by a py.path LocalPath and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip('py.path').local
if path is None:
path = '___localpath___'
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
def assert_almost_equal(left, right, check_exact=False,
check_dtype='equiv', check_less_precise=False,
**kwargs):
"""
Check that the left and right objects are approximately equal.
Parameters
----------
left : object
right : object
check_exact : bool, default False
Whether to compare number exactly.
check_dtype: bool, default True
check dtype if both a and b are the same type
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
"""
if isinstance(left, pd.Index):
return assert_index_equal(left, right, check_exact=check_exact,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.Series):
return assert_series_equal(left, right, check_exact=check_exact,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.DataFrame):
return assert_frame_equal(left, right, check_exact=check_exact,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
else:
# other sequences
if check_dtype:
if is_number(left) and is_number(right):
# do not compare numeric classes, like np.float64 and float
pass
elif is_bool(left) and is_bool(right):
# do not compare bool classes, like np.bool_ and bool
pass
else:
if (isinstance(left, np.ndarray) or
isinstance(right, np.ndarray)):
obj = 'numpy array'
else:
obj = 'Input'
assert_class_equal(left, right, obj=obj)
return _testing.assert_almost_equal(
left, right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
err_msg = "{name} Expected type {exp_type}, found {act_type} instead"
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
act_type=type(left)))
if not isinstance(right, cls):
raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
act_type=type(right)))
def assert_dict_equal(left, right, compare_keys=True):
_check_isinstance(left, right, dict)
return _testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p=0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
RANDU_CHARS = np.array(list(u("").join(map(unichr, lrange(1488, 1488 + 26))) +
string.digits), dtype=(np.unicode_, 1))
def rands_array(nchars, size, dtype='O'):
"""Generate an array of byte strings."""
retval = (np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def randu_array(nchars, size, dtype='O'):
"""Generate an array of unicode strings."""
retval = (np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(np.random.choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return ''.join(np.random.choice(RANDU_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
def _skip_if_32bit():
if is_platform_32bit():
import pytest
pytest.skip("skipping for 32 bit")
def _skip_if_no_mpl():
import pytest
mpl = pytest.importorskip("matplotlib")
mpl.use("Agg", warn=False)
def _skip_if_mpl_1_5():
import matplotlib as mpl
v = mpl.__version__
if v > LooseVersion('1.4.3') or v[0] == '0':
import pytest
pytest.skip("matplotlib 1.5")
else:
mpl.use("Agg", warn=False)
def _skip_if_no_scipy():
import pytest
pytest.importorskip("scipy.stats")
pytest.importorskip("scipy.sparse")
pytest.importorskip("scipy.interpolate")
def _check_if_lzma():
try:
return compat.import_lzma()
except ImportError:
return False
def _skip_if_no_lzma():
import pytest
return _check_if_lzma() or pytest.skip('need backports.lzma to run')
def _skip_if_no_xarray():
import pytest
xarray = pytest.importorskip("xarray")
v = xarray.__version__
if v < LooseVersion('0.7.0'):
import pytest
pytest.skip("xarray version is too low: {version}".format(version=v))
def _skip_if_windows_python_3():
if PY3 and is_platform_windows():
import pytest
pytest.skip("not used on python 3/win32")
def _skip_if_windows():
if is_platform_windows():
import pytest
pytest.skip("Running on Windows")
def _skip_if_no_pathlib():
try:
from pathlib import Path # noqa
except ImportError:
import pytest
pytest.skip("pathlib not available")
def _skip_if_no_localpath():
try:
from py.path import local as LocalPath # noqa
except ImportError:
import pytest
pytest.skip("py.path not installed")
def _incompat_bottleneck_version(method):
""" skip if we have bottleneck installed
and its >= 1.0
as we don't match the nansum/nanprod behavior for all-nan
ops, see GH9422
"""
if method not in ['sum', 'prod']:
return False
try:
import bottleneck as bn
return bn.__version__ >= LooseVersion('1.0')
except ImportError:
return False
def skip_if_no_ne(engine='numexpr'):
from pandas.core.computation.expressions import (
_USE_NUMEXPR,
_NUMEXPR_INSTALLED)
if engine == 'numexpr':
if not _USE_NUMEXPR:
import pytest
pytest.skip("numexpr enabled->{enabled}, "
"installed->{installed}".format(
enabled=_USE_NUMEXPR,
installed=_NUMEXPR_INSTALLED))
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
import pytest
pytest.skip("Specific locale is set {lang}".format(lang=lang))
def _skip_if_not_us_locale():
import locale
lang, _ = locale.getlocale()
if lang != 'en_US':
import pytest
pytest.skip("Specific locale is set {lang}".format(lang=lang))
def _skip_if_no_mock():
try:
import mock # noqa
except ImportError:
try:
from unittest import mock # noqa
except ImportError:
import pytest
raise pytest.skip("mock is not installed")
def _skip_if_no_ipython():
import pytest
pytest.importorskip("IPython")
# -----------------------------------------------------------------------------
# locale utilities
def check_output(*popenargs, **kwargs):
# shamelessly taken from Python 2.7 source
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE,
*popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def _default_locale_getter():
try:
raw_locales = check_output(['locale -a'], shell=True)
except subprocess.CalledProcessError as e:
raise type(e)("{exception}, the 'locale -a' command cannot be found "
"on your system".format(exception=e))
return raw_locales
def get_locales(prefix=None, normalize=True,
locale_getter=_default_locale_getter):
"""Get all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to get all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Call ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
locale_getter : callable
The function to use to retrieve the current locales. This should return
a string with each locale separated by a newline character.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return None (no locale available, e.g. Windows)
"""
try:
raw_locales = locale_getter()
except:
return None
try:
# raw_locales is "\n" separated list of locales
# it may contain non-decodable parts, so split
# extract what we can and then rejoin.
raw_locales = raw_locales.split(b'\n')
out_locales = []
for x in raw_locales:
if PY3:
out_locales.append(str(
x, encoding=pd.options.display.encoding))
else:
out_locales.append(str(x))
except TypeError:
pass
if prefix is None:
return _valid_locales(out_locales, normalize)
found = re.compile('{prefix}.*'.format(prefix=prefix)) \
.findall('\n'.join(out_locales))
return _valid_locales(found, normalize)
@contextmanager
def set_locale(new_locale, lc_var=locale.LC_ALL):
"""Context manager for temporarily setting a locale.
Parameters
----------
new_locale : str or tuple
A string of the form <language_country>.<encoding>. For example to set
the current locale to US English with a UTF8 encoding, you would pass
"en_US.UTF-8".
Notes
-----
This is useful when you want to run a particular block of code under a
particular locale, without globally setting the locale. This probably isn't
thread-safe.
"""
current_locale = locale.getlocale()
try:
locale.setlocale(lc_var, new_locale)
try:
normalized_locale = locale.getlocale()
except ValueError:
yield new_locale
else:
if all(lc is not None for lc in normalized_locale):
yield '.'.join(normalized_locale)
else:
yield new_locale
finally:
locale.setlocale(lc_var, current_locale)
def _can_set_locale(lc):
"""Check to see if we can set a locale without throwing an exception.
Parameters
----------
lc : str
The locale to attempt to set.
Returns
-------
isvalid : bool
Whether the passed locale can be set
"""
try:
with set_locale(lc):
pass
except locale.Error: # horrible name for a Exception subclass
return False
else:
return True
def _valid_locales(locales, normalize):
"""Return a list of normalized locales that do not throw an ``Exception``
when set.
Parameters
----------
locales : str
A string where each locale is separated by a newline.
normalize : bool
Whether to call ``locale.normalize`` on each locale.
Returns
-------
valid_locales : list
A list of valid locales.
"""
if normalize:
normalizer = lambda x: locale.normalize(x.strip())
else:
normalizer = lambda x: x.strip()
return list(filter(_can_set_locale, map(normalizer, locales)))
# -----------------------------------------------------------------------------
# Stdout / stderr decorators
def capture_stdout(f):
"""
Decorator to capture stdout in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stdout.
Returns
-------
f : callable
The decorated test ``f``, which captures stdout.
Examples
--------
>>> from pandas.util.testing import capture_stdout
>>>
>>> import sys
>>>
>>> @capture_stdout
... def test_print_pass():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stdout
... def test_print_fail():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stdout = StringIO()
f(*args, **kwargs)
finally:
sys.stdout = sys.__stdout__
return wrapper
def capture_stderr(f):
"""
Decorator to capture stderr in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stderr.
Returns
-------
f : callable
The decorated test ``f``, which captures stderr.
Examples
--------
>>> from pandas.util.testing import capture_stderr
>>>
>>> import sys
>>>
>>> @capture_stderr
... def test_stderr_pass():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stderr
... def test_stderr_fail():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stderr = StringIO()
f(*args, **kwargs)
finally:
sys.stderr = sys.__stderr__
return wrapper
# -----------------------------------------------------------------------------
# Console debugging tools
def debug(f, *args, **kwargs):
from pdb import Pdb as OldPdb
try:
from IPython.core.debugger import Pdb
kw = dict(color_scheme='Linux')
except ImportError:
Pdb = OldPdb
kw = {}
pdb = Pdb(**kw)
return pdb.runcall(f, *args, **kwargs)
def pudebug(f, *args, **kwargs):
import pudb
return pudb.runcall(f, *args, **kwargs)
def set_trace():
from IPython.core.debugger import Pdb
try:
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
except:
from pdb import Pdb as OldPdb
OldPdb().set_trace(sys._getframe().f_back)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False):
"""Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
"""
filename = filename or ''
fd = None
if return_filelike:
f = tempfile.TemporaryFile(suffix=filename)
try:
yield f
finally:
f.close()
else:
# don't generate tempfile if using a path with directory specified
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(suffix=filename)
except UnicodeEncodeError:
import pytest
pytest.skip('no unicode file names on this system')
try:
yield filename
finally:
try:
os.close(fd)
except Exception as e:
print("Couldn't close file descriptor: {fdesc} (file: {fname})"
.format(fdesc=fd, fname=filename))
try:
if os.path.exists(filename):
os.remove(filename)
except Exception as e:
print("Exception on removing file: {error}".format(error=e))
def get_data_path(f=''):
"""Return the path of a data file, these are relative to the current test
directory.
"""
# get our callers file
_, filename, _, _, _, _ = inspect.getouterframes(inspect.currentframe())[1]
base_dir = os.path.abspath(os.path.dirname(filename))
return os.path.join(base_dir, 'data', f)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2):
"""Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(left, right, exact='equiv', check_names=True,
check_less_precise=False, check_exact=True,
check_categorical=True, obj='Index'):
"""Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool / string {'equiv'}, default False
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message
"""
def _check_types(l, r, obj='Index'):
if exact:
assert_class_equal(left, right, exact=exact, obj=obj)
assert_attr_equal('dtype', l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ('string', 'unicode'):
assert r.inferred_type in ('string', 'unicode')
else:
assert_attr_equal('inferred_type', l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
labels = index.labels[level]
filled = take_1d(unique.values, labels, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = '{obj} levels are different'.format(obj=obj)
msg2 = '{nlevels}, {left}'.format(nlevels=left.nlevels, left=left)
msg3 = '{nlevels}, {right}'.format(nlevels=right.nlevels, right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = '{obj} length are different'.format(obj=obj)
msg2 = '{length}, {left}'.format(length=len(left), left=left)
msg3 = '{length}, {right}'.format(length=len(right), right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = 'MultiIndex level [{level}]'.format(level=level)
assert_index_equal(llevel, rlevel,
exact=exact, check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact, obj=lobj)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
if check_exact:
if not left.equals(right):
diff = np.sum((left.values != right.values)
.astype(int)) * 100.0 / len(left)
msg = '{obj} values are different ({pct} %)'.format(
obj=obj, pct=np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(left.values, right.values,
check_less_precise=check_less_precise,
check_dtype=exact,
obj=obj, lobj=left, robj=right)
# metadata comparison
if check_names:
assert_attr_equal('names', left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal('freq', left, right, obj=obj)
if (isinstance(left, pd.IntervalIndex) or
isinstance(right, pd.IntervalIndex)):
assert_attr_equal('closed', left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{obj} category'.format(obj=obj))
def assert_class_equal(left, right, exact=True, obj='Input'):
"""checks classes are equal."""
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
try:
return x.__class__.__name__
except AttributeError:
return repr(type(x))
if exact == 'equiv':
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = set([type(left).__name__, type(right).__name__])
if len(types - set(['Int64Index', 'RangeIndex'])):
msg = '{obj} classes are not equivalent'.format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
elif exact:
if type(left) != type(right):
msg = '{obj} classes are different'.format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
def assert_attr_equal(attr, left, right, obj='Attributes'):
"""checks attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (is_number(left_attr) and np.isnan(left_attr) and
is_number(right_attr) and np.isnan(right_attr)):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = 'Attribute "{attr}" are different'.format(attr=attr)
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = ('one of \'objs\' is not a matplotlib Axes instance, type '
'encountered {name!r}').format(name=el.__class__.__name__)
assert isinstance(el, (plt.Axes, dict)), msg
else:
assert isinstance(objs, (plt.Artist, tuple, dict)), \
('objs is neither an ndarray of Artist instances nor a '
'single Artist instance, tuple, or dict, "objs" is a {name!r}'
).format(name=objs.__class__.__name__)
def isiterable(obj):
return hasattr(obj, '__iter__')
def is_sorted(seq):
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
return assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(left, right, check_dtype=True,
obj='Categorical', check_category_order=True):
"""Test that Categoricals are equivalent.
Parameters
----------
left, right : Categorical
Categoricals to compare
check_dtype : bool, default True
Check that integer dtype of the codes are the same
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories,
obj='{obj}.categories'.format(obj=obj))
assert_numpy_array_equal(left.codes, right.codes,
check_dtype=check_dtype,
obj='{obj}.codes'.format(obj=obj))
else:
assert_index_equal(left.categories.sort_values(),
right.categories.sort_values(),
obj='{obj}.categories'.format(obj=obj))
assert_index_equal(left.categories.take(left.codes),
right.categories.take(right.codes),
obj='{obj}.values'.format(obj=obj))
assert_attr_equal('ordered', left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None):
if isinstance(left, np.ndarray):
left = pprint_thing(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
msg = """{obj} are different
{message}
[left]: {left}
[right]: {right}""".format(obj=obj, message=message, left=left, right=right)
if diff is not None:
msg += "\n[diff]: {diff}".format(diff=diff)
raise AssertionError(msg)
def assert_numpy_array_equal(left, right, strict_nan=False,
check_dtype=True, err_msg=None,
obj='numpy array', check_same=None):
""" Checks that 'np.ndarray' is equivalent
Parameters
----------
left : np.ndarray or iterable
right : np.ndarray or iterable
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype: bool, default True
check dtype if both a and b are np.ndarray
err_msg : str, default None
If provided, used as assertion message
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area
"""
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, 'base', None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == 'same':
if left_base is not right_base:
msg = "{left!r} is not {right!r}".format(
left=left_base, right=right_base)
raise AssertionError(msg)
elif check_same == 'copy':
if left_base is right_base:
msg = "{left!r} is {right!r}".format(
left=left_base, right=right_base)
raise AssertionError(msg)
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(obj, '{obj} shapes are different'
.format(obj=obj), left.shape, right.shape)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = '{obj} values are different ({pct} %)'.format(
obj=obj, pct=np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal('dtype', left, right, obj=obj)
return True
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(left, right, check_dtype=True,
check_index_type='equiv',
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
obj='Series'):
"""Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_exact : bool, default False
Whether to compare number exactly.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message
"""
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
# ToDo: There are some tests using rhs is sparse
# lhs is dense. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = '{len}, {left}'.format(len=len(left), left=left.index)
msg2 = '{len}, {right}'.format(len=len(right), right=right.index)
raise_assert_detail(obj, 'Series length are different', msg1, msg2)
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{obj}.index'.format(obj=obj))
if check_dtype:
assert_attr_equal('dtype', left, right)
if check_exact:
assert_numpy_array_equal(left.get_values(), right.get_values(),
check_dtype=check_dtype,
obj='{obj}'.format(obj=obj),)
elif check_datetimelike_compat:
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
if (is_datetimelike_v_numeric(left, right) or
is_datetimelike_v_object(left, right) or
needs_i8_conversion(left) or
needs_i8_conversion(right)):
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left.values).equals(Index(right.values)):
msg = ('[datetimelike_compat=True] {left} is not equal to '
'{right}.').format(left=left.values, right=right.values)
raise AssertionError(msg)
else:
assert_numpy_array_equal(left.get_values(), right.get_values(),
check_dtype=check_dtype)
elif is_interval_dtype(left) or is_interval_dtype(right):
# TODO: big hack here
l = pd.IntervalIndex(left)
r = pd.IntervalIndex(right)
assert_index_equal(l, r, obj='{obj}.index'.format(obj=obj))
else:
_testing.assert_almost_equal(left.get_values(), right.get_values(),
check_less_precise=check_less_precise,
check_dtype=check_dtype,
obj='{obj}'.format(obj=obj))
# metadata comparison
if check_names:
assert_attr_equal('name', left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{obj} category'.format(obj=obj))
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(left, right, check_dtype=True,
check_index_type='equiv',
check_column_type='equiv',
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
obj='DataFrame'):
"""Check that left and right DataFrame are equal.
Parameters
----------
left : DataFrame
right : DataFrame
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool / string {'equiv'}, default False
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool / string {'equiv'}, default False
Whether to check the columns class, dtype and inferred_type
are identical.
check_frame_type : bool, default False
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_names : bool, default True
Whether to check the Index names attribute.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If true, ignore the order of rows & columns
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message
"""
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
# ToDo: There are some tests using rhs is SparseDataFrame
# lhs is DataFrame. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(obj,
'DataFrame shape mismatch',
'{shape!r}'.format(shape=left.shape),
'{shape!r}'.format(shape=right.shape))
if check_like:
left, right = left.reindex_like(right), right
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{obj}.index'.format(obj=obj))
# column comparison
assert_index_equal(left.columns, right.columns, exact=check_column_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{obj}.columns'.format(obj=obj))
# compare by blocks
if by_blocks:
rblocks = right.blocks
lblocks = left.blocks
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(lblocks[dtype], rblocks[dtype],
check_dtype=check_dtype, obj='DataFrame.blocks')
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol, rcol, check_dtype=check_dtype,
check_index_type=check_index_type,
check_less_precise=check_less_precise,
check_exact=check_exact, check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
obj='DataFrame.iloc[:, {idx}]'.format(idx=i))
def assert_panelnd_equal(left, right,
check_dtype=True,
check_panel_type=False,
check_less_precise=False,
assert_func=assert_frame_equal,
check_names=False,
by_blocks=False,
obj='Panel'):
"""Check that left and right Panels are equal.
Parameters
----------
left : Panel (or nd)
right : Panel (or nd)
check_dtype : bool, default True
Whether to check the Panel dtype is identical.
check_panel_type : bool, default False
Whether to check the Panel class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
assert_func : function for comparing data
check_names : bool, default True
Whether to check the Index names attribute.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
obj : str, default 'Panel'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
if check_panel_type:
assert_class_equal(left, right, obj=obj)
for axis in left._AXIS_ORDERS:
left_ind = getattr(left, axis)
right_ind = getattr(right, axis)
assert_index_equal(left_ind, right_ind, check_names=check_names)
if by_blocks:
rblocks = right.blocks
lblocks = left.blocks
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
array_equivalent(lblocks[dtype].values, rblocks[dtype].values)
else:
# can potentially be slow
for i, item in enumerate(left._get_axis(0)):
msg = "non-matching item (right) '{item}'".format(item=item)
assert item in right, msg
litem = left.iloc[i]
ritem = right.iloc[i]
assert_func(litem, ritem, check_less_precise=check_less_precise)
for i, item in enumerate(right._get_axis(0)):
msg = "non-matching item (left) '{item}'".format(item=item)
assert item in left, msg
# TODO: strangely check_names fails in py3 ?
_panel_frame_equal = partial(assert_frame_equal, check_names=False)
assert_panel_equal = partial(assert_panelnd_equal,
assert_func=_panel_frame_equal)
assert_panel4d_equal = partial(assert_panelnd_equal,
assert_func=assert_panel_equal)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(left, right, check_dtype=True):
"""Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
check_dtype : bool, default True
Whether to check the data dtype is identical.
"""
_check_isinstance(left, right, pd.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values,
check_dtype=check_dtype)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
if not left.sp_index.equals(right.sp_index):
raise_assert_detail('SparseArray.index', 'index are not equal',
left.sp_index, right.sp_index)
assert_attr_equal('fill_value', left, right)
if check_dtype:
assert_attr_equal('dtype', left, right)
assert_numpy_array_equal(left.values, right.values,
check_dtype=check_dtype)
def assert_sp_series_equal(left, right, check_dtype=True, exact_indices=True,
check_series_type=True, check_names=True,
obj='SparseSeries'):
"""Check that the left and right SparseSeries are equal.
Parameters
----------
left : SparseSeries
right : SparseSeries
check_dtype : bool, default True
Whether to check the Series dtype is identical.
exact_indices : bool, default True
check_series_type : bool, default True
Whether to check the SparseSeries class is identical.
check_names : bool, default True
Whether to check the SparseSeries name attribute.
obj : str, default 'SparseSeries'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
_check_isinstance(left, right, pd.SparseSeries)
if check_series_type:
assert_class_equal(left, right, obj=obj)
assert_index_equal(left.index, right.index,
obj='{obj}.index'.format(obj=obj))
assert_sp_array_equal(left.block.values, right.block.values)
if check_names:
assert_attr_equal('name', left, right)
if check_dtype:
assert_attr_equal('dtype', left, right)
assert_numpy_array_equal(left.values, right.values)
def assert_sp_frame_equal(left, right, check_dtype=True, exact_indices=True,
check_frame_type=True, obj='SparseDataFrame'):
"""Check that the left and right SparseDataFrame are equal.
Parameters
----------
left : SparseDataFrame
right : SparseDataFrame
check_dtype : bool, default True
Whether to check the Series dtype is identical.
exact_indices : bool, default True
SparseSeries SparseIndex objects must be exactly the same,
otherwise just compare dense representations.
check_frame_type : bool, default True
Whether to check the SparseDataFrame class is identical.
obj : str, default 'SparseDataFrame'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
_check_isinstance(left, right, pd.SparseDataFrame)
if check_frame_type:
assert_class_equal(left, right, obj=obj)
assert_index_equal(left.index, right.index,
obj='{obj}.index'.format(obj=obj))
assert_index_equal(left.columns, right.columns,
obj='{obj}.columns'.format(obj=obj))
for col, series in compat.iteritems(left):
assert (col in right)
# trade-off?
if exact_indices:
assert_sp_series_equal(series, right[col],
check_dtype=check_dtype)
else:
assert_series_equal(series.to_dense(), right[col].to_dense(),
check_dtype=check_dtype)
assert_attr_equal('default_fill_value', left, right, obj=obj)
# do I care?
# assert(left.default_kind == right.default_kind)
for col in right:
assert (col in left)
def assert_sp_list_equal(left, right):
assert isinstance(left, pd.SparseList)
assert isinstance(right, pd.SparseList)
assert_sp_array_equal(left.to_array(), right.to_array())
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, "Did not contain item: '{key!r}'".format(key=k)
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = ("Expected object {obj1!r} and object {obj2!r} to be "
"different objects, but they were the same object."
).format(obj1=type(elem1), obj2=type(elem2))
assert elem1 is not elem2, msg
def getCols(k):
return string.ascii_uppercase[:k]
def getArangeMat():
return np.arange(N * K).reshape((N, K))
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(np.random.choice(x, k), name=name)
def makeIntervalIndex(k=10, name=None):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(lrange(k), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2**63 + i for i in lrange(k)], name=name)
def makeRangeIndex(k=10, name=None):
return RangeIndex(0, k, 1, name=name)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq='B', name=None):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name)
def makeTimedeltaIndex(k=10, freq='D', name=None):
return TimedeltaIndex(start='1 day', periods=k, freq=freq, name=name)
def makePeriodIndex(k=10, name=None):
dt = datetime(2000, 1, 1)
dr = PeriodIndex(start=dt, periods=k, freq='B', name=name)
return dr
def all_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the various
index classes.
Parameters
----------
k: length of each of the index instances
"""
all_make_index_funcs = [makeIntIndex, makeFloatIndex, makeStringIndex,
makeUnicodeIndex, makeDateIndex, makePeriodIndex,
makeTimedeltaIndex, makeBoolIndex,
makeCategoricalIndex]
for make_index_func in all_make_index_funcs:
yield make_index_func(k=k)
def all_timeseries_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the classes
which represent time-seires.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeObjectSeries(name=None):
dateIndex = makeDateIndex(N)
dateIndex = Index(dateIndex, dtype=object)
index = makeStringIndex(N)
return Series(dateIndex, index=index, name=name)
def getSeriesData():
index = makeStringIndex(N)
return dict((c, Series(randn(N), index=index)) for c in getCols(K))
def makeTimeSeries(nper=None, freq='B', name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq='B'):
return dict((c, makeTimeSeries(nper, freq)) for c in getCols(K))
def getPeriodData(nper=None):
return dict((c, makePeriodSeries(nper)) for c in getCols(K))
# make frame
def makeTimeDataFrame(nper=None, freq='B'):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(['a', 'b', 'c', 'd', 'e'])
data = {
'A': [0., 1., 2., 3., 4.],
'B': [0., 1., 0., 1., 0.],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': bdate_range('1/1/2009', periods=5)
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makePanel(nper=None):
with warnings.catch_warnings(record=True):
cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]
data = dict((c, makeTimeDataFrame(nper)) for c in cols)
return Panel.fromDict(data)
def makePeriodPanel(nper=None):
with warnings.catch_warnings(record=True):
cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]
data = dict((c, makePeriodFrame(nper)) for c in cols)
return Panel.fromDict(data)
def makePanel4D(nper=None):
with warnings.catch_warnings(record=True):
d = dict(l1=makePanel(nper), l2=makePanel(nper),
l3=makePanel(nper))
return Panel4D(d)
def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None,
idx_type=None):
"""Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert (is_sequence(ndupe_l) and len(ndupe_l) <= nlevels)
assert (names is None or names is False or
names is True or len(names) is nlevels)
assert idx_type is None or \
(idx_type in ('i', 'f', 's', 'u', 'dt', 'p', 'td') and nlevels == 1)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singelton case uniform
if isinstance(names, compat.string_types) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(i=makeIntIndex, f=makeFloatIndex,
s=makeStringIndex, u=makeUnicodeIndex,
dt=makeDateIndex, td=makeTimedeltaIndex,
p=makePeriodIndex).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError('"{idx_type}" is not a legal value for `idx_type`, '
'use "i"/"f"/"s"/"u"/"dt/"p"/"td".'
.format(idx_type=idx_type))
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all([x > 0 for x in ndupe_l])
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub("[^\d_]_?", "", x).split("_")
return lmap(int, numeric_tuple)
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
cnt = Counter()
for j in range(div_factor):
label = '{prefix}_l{i}_g{j}'.format(prefix=prefix, i=i, j=j)
cnt[label] = ndupe_l[i]
# cute Counter trick
result = list(sorted(cnt.elements(), key=keyfunc))[:nentries]
tuples.append(result)
tuples = lzip(*tuples)
# convert tuples to index
if nentries == 1:
index = Index(tuples[0], name=names[0])
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(nrows, ncols, c_idx_names=True, r_idx_names=True,
c_idx_nlevels=1, r_idx_nlevels=1, data_gen_f=None,
c_ndupe_l=None, r_ndupe_l=None, dtype=None,
c_idx_type=None, r_idx_type=None):
"""
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjuncion with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples:
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or \
(r_idx_type in ('i', 'f', 's',
'u', 'dt', 'p', 'td') and r_idx_nlevels == 1)
assert c_idx_type is None or \
(c_idx_type in ('i', 'f', 's',
'u', 'dt', 'p', 'td') and c_idx_nlevels == 1)
columns = makeCustomIndex(ncols, nlevels=c_idx_nlevels, prefix='C',
names=c_idx_names, ndupe_l=c_ndupe_l,
idx_type=c_idx_type)
index = makeCustomIndex(nrows, nlevels=r_idx_nlevels, prefix='R',
names=r_idx_names, ndupe_l=r_ndupe_l,
idx_type=r_idx_type)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: "R{rows}C{cols}".format(rows=r, cols=c)
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1. / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingCustomDataframe(nrows, ncols, density=.9, random_state=None,
c_idx_names=True, r_idx_names=True,
c_idx_nlevels=1, r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None, r_ndupe_l=None, dtype=None,
c_idx_type=None, r_idx_type=None):
"""
Parameters
----------
Density : float, optional
Float in (0, 1) that gives the percentage of non-missing numbers in
the DataFrame.
random_state : {np.random.RandomState, int}, optional
Random number generator or random seed.
See makeCustomDataframe for descriptions of the rest of the parameters.
"""
df = makeCustomDataframe(nrows, ncols, c_idx_names=c_idx_names,
r_idx_names=r_idx_names,
c_idx_nlevels=c_idx_nlevels,
r_idx_nlevels=r_idx_nlevels,
data_gen_f=data_gen_f,
c_ndupe_l=c_ndupe_l, r_ndupe_l=r_ndupe_l,
dtype=dtype, c_idx_type=c_idx_type,
r_idx_type=r_idx_type)
i, j = _create_missing_idx(nrows, ncols, density, random_state)
df.values[i, j] = np.nan
return df
def makeMissingDataframe(density=.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density,
random_state=random_state)
df.values[i, j] = np.nan
return df
def add_nans(panel):
I, J, N = panel.shape
for i, item in enumerate(panel.items):
dm = panel[item]
for j, col in enumerate(dm.columns):
dm[col][:i + j] = np.NaN
return panel
def add_nans_panel4d(panel4d):
for l, label in enumerate(panel4d.labels):
panel = panel4d[label]
add_nans(panel)
return panel4d
class TestSubDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
# Dependency checker when running tests.
#
# Copied this from nipy/nipype
# Copyright of respective developers, License: BSD-3
def skip_if_no_package(pkg_name, min_version=None, max_version=None,
app='pandas', checker=LooseVersion):
"""Check that the min/max version of the required package is installed.
If the package check fails, the test is automatically skipped.
Parameters
----------
pkg_name : string
Name of the required package.
min_version : string, optional
Minimal version number for required package.
max_version : string, optional
Max version number for required package.
app : string, optional
Application that is performing the check. For instance, the
name of the tutorial being executed that depends on specific
packages.
checker : object, optional
The class that will perform the version checking. Default is
distutils.version.LooseVersion.
Examples
--------
package_check('numpy', '1.3')
"""
import pytest
if app:
msg = '{app} requires {pkg_name}'.format(app=app, pkg_name=pkg_name)
else:
msg = 'module requires {pkg_name}'.format(pkg_name=pkg_name)
if min_version:
msg += ' with version >= {min_version}'.format(min_version=min_version)
if max_version:
msg += ' with version < {max_version}'.format(max_version=max_version)
try:
mod = __import__(pkg_name)
except ImportError:
mod = None
try:
have_version = mod.__version__
except AttributeError:
pytest.skip('Cannot find version for {pkg_name}'
.format(pkg_name=pkg_name))
if min_version and checker(have_version) < checker(min_version):
pytest.skip(msg)
if max_version and checker(have_version) >= checker(max_version):
pytest.skip(msg)
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
'timed out',
'Server Hangup',
'HTTP Error 503: Service Unavailable',
'502: Proxy Error',
'HTTP Error 502: internal error',
'HTTP Error 502',
'HTTP Error 503',
'HTTP Error 403',
'HTTP Error 400',
'Temporary failure in name resolution',
'Name or service not known',
'Connection refused',
'certificate verify',
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on these exception types
_network_error_classes = (IOError, httplib.HTTPException)
if sys.version_info >= (3, 3):
_network_error_classes += (TimeoutError,) # noqa
def can_connect(url, error_classes=_network_error_classes):
"""Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(t, url="http://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=_network_error_classes,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'http://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to supress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supercedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas.util.testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("http://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if check_before_test and not raise_on_error:
if not can_connect(url, error_classes):
skip()
try:
return t(*args, **kwargs)
except Exception as e:
errno = getattr(e, 'errno', None)
if not errno and hasattr(errno, "reason"):
errno = getattr(e.reason, 'errno', None)
if errno in skip_errnos:
skip("Skipping test due to known errno"
" and error {error}".format(error=e))
try:
e_str = traceback.format_exc(e)
except:
e_str = str(e)
if any([m.lower() in e_str.lower() for m in _skip_on_messages]):
skip("Skipping test because exception "
"message is known and error {error}".format(error=e))
if not isinstance(e, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip("Skipping test due to lack of connectivity"
" and error {error}".format(e))
return wrapper
with_connectivity_check = network
class SimpleMock(object):
"""
Poor man's mocking object
Note: only works for new-style classes, assumes __getattribute__ exists.
>>> a = type("Duck",(),{})
>>> a.attr1,a.attr2 ="fizz","buzz"
>>> b = SimpleMock(a,"attr1","bar")
>>> b.attr1 == "bar" and b.attr2 == "buzz"
True
>>> a.attr1 == "fizz" and a.attr2 == "buzz"
True
"""
def __init__(self, obj, *args, **kwds):
assert(len(args) % 2 == 0)
attrs = kwds.get("attrs", {})
for k, v in zip(args[::2], args[1::2]):
# dict comprehensions break 2.6
attrs[k] = v
self.attrs = attrs
self.obj = obj
def __getattribute__(self, name):
attrs = object.__getattribute__(self, "attrs")
obj = object.__getattribute__(self, "obj")
return attrs.get(name, type(obj).__getattribute__(obj, name))
@contextmanager
def stdin_encoding(encoding=None):
"""
Context manager for running bits of code while emulating an arbitrary
stdin encoding.
>>> import sys
>>> _encoding = sys.stdin.encoding
>>> with stdin_encoding('AES'): sys.stdin.encoding
'AES'
>>> sys.stdin.encoding==_encoding
True
"""
import sys
_stdin = sys.stdin
sys.stdin = SimpleMock(sys.stdin, "encoding", encoding)
yield
sys.stdin = _stdin
def assert_raises_regex(_exception, _regexp, _callable=None,
*args, **kwargs):
"""
Check that the specified Exception is raised and that the error message
matches a given regular expression pattern. This may be a regular
expression object or a string containing a regular expression suitable
for use by `re.search()`. This is a port of the `assertRaisesRegexp`
function from unittest in Python 2.7.
Examples
--------
>>> assert_raises_regex(ValueError, 'invalid literal for.*XYZ', int, 'XYZ')
>>> import re
>>> assert_raises_regex(ValueError, re.compile('literal'), int, 'XYZ')
If an exception of a different type is raised, it bubbles up.
>>> assert_raises_regex(TypeError, 'literal', int, 'XYZ')
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'XYZ'
>>> dct = dict()
>>> assert_raises_regex(KeyError, 'pear', dct.__getitem__, 'apple')
Traceback (most recent call last):
...
AssertionError: "pear" does not match "'apple'"
You can also use this in a with statement.
>>> with assert_raises_regex(TypeError, 'unsupported operand type\(s\)'):
... 1 + {}
>>> with assert_raises_regex(TypeError, 'banana'):
... 'apple'[0] = 'b'
Traceback (most recent call last):
...
AssertionError: "banana" does not match "'str' object does not support \
item assignment"
"""
manager = _AssertRaisesContextmanager(exception=_exception, regexp=_regexp)
if _callable is not None:
with manager:
_callable(*args, **kwargs)
else:
return manager
class _AssertRaisesContextmanager(object):
"""
Context manager behind `assert_raises_regex`.
"""
def __init__(self, exception, regexp=None):
"""
Initialize an _AssertRaisesContextManager instance.
Parameters
----------
exception : class
The expected Exception class.
regexp : str, default None
The regex to compare against the Exception message.
"""
self.exception = exception
if regexp is not None and not hasattr(regexp, "search"):
regexp = re.compile(regexp, re.DOTALL)
self.regexp = regexp
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, trace_back):
expected = self.exception
if not exc_type:
exp_name = getattr(expected, "__name__", str(expected))
raise AssertionError("{name} not raised.".format(name=exp_name))
return self.exception_matches(exc_type, exc_value, trace_back)
def exception_matches(self, exc_type, exc_value, trace_back):
"""
Check that the Exception raised matches the expected Exception
and expected error message regular expression.
Parameters
----------
exc_type : class
The type of Exception raised.
exc_value : Exception
The instance of `exc_type` raised.
trace_back : stack trace object
The traceback object associated with `exc_value`.
Returns
-------
is_matched : bool
Whether or not the Exception raised matches the expected
Exception class and expected error message regular expression.
Raises
------
AssertionError : The error message provided does not match
the expected error message regular expression.
"""
if issubclass(exc_type, self.exception):
if self.regexp is not None:
val = str(exc_value)
if not self.regexp.search(val):
msg = '"{pat}" does not match "{val}"'.format(
pat=self.regexp.pattern, val=val)
e = AssertionError(msg)
raise_with_traceback(e, trace_back)
return True
else:
# Failed, so allow Exception to bubble up.
return False
@contextmanager
def assert_produces_warning(expected_warning=Warning, filter_level="always",
clear=None, check_stacklevel=True):
"""
Context manager for running code that expects to raise (or not raise)
warnings. Checks that code raises the expected warning and only the
expected warning. Pass ``False`` or ``None`` to check that it does *not*
raise a warning. Defaults to ``exception.Warning``, baseclass of all
Warnings. (basically a wrapper around ``warnings.catch_warnings``).
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
with warnings.catch_warnings(record=True) as w:
if clear is not None:
# make sure that we are clearning these warnings
# if they have happened before
# to guarantee that we will catch them
if not is_list_like(clear):
clear = [clear]
for m in clear:
try:
m.__warningregistry__.clear()
except:
pass
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if (expected_warning and issubclass(actual_warning.category,
expected_warning)):
saw_warning = True
if check_stacklevel and issubclass(actual_warning.category,
(FutureWarning,
DeprecationWarning)):
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[2][0])
msg = ("Warning not set with correct stacklevel. "
"File where warning is raised: {actual} != "
"{caller}. Warning message: {message}"
).format(actual=actual_warning.filename,
caller=caller.filename,
message=actual_warning.message)
assert actual_warning.filename == caller.filename, msg
else:
extra_warnings.append(actual_warning.category.__name__)
if expected_warning:
msg = "Did not see expected warning of class {name!r}.".format(
name=expected_warning.__name__)
assert saw_warning, msg
assert not extra_warnings, ("Caused unexpected warning(s): {extra!r}."
).format(extra=extra_warnings)
class RNGContext(object):
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def use_numexpr(use, min_elements=expr._MIN_ELEMENTS):
olduse = expr._USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
def test_parallel(num_threads=2, kwargs_list=None):
"""Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args,
kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ['testattr', 'name']
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ['testattr']
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedSparseSeries(pd.SparseSeries):
_metadata = ['testattr']
@property
def _constructor(self):
return SubclassedSparseSeries
@property
def _constructor_expanddim(self):
return SubclassedSparseDataFrame
class SubclassedSparseDataFrame(pd.SparseDataFrame):
_metadata = ['testattr']
@property
def _constructor(self):
return SubclassedSparseDataFrame
@property
def _constructor_sliced(self):
return SubclassedSparseSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
@contextmanager
def patch(ob, attr, value):
"""Temporarily patch an attribute of an object.
Parameters
----------
ob : any
The object to patch. This must support attribute assignment for `attr`.
attr : str
The name of the attribute to patch.
value : any
The temporary attribute to assign.
Examples
--------
>>> class C(object):
... attribute = 'original'
...
>>> C.attribute
'original'
>>> with patch(C, 'attribute', 'patched'):
... in_context = C.attribute
...
>>> in_context
'patched'
>>> C.attribute # the value is reset when the context manager exists
'original'
Correctly replaces attribute when the manager exits with an exception.
>>> with patch(C, 'attribute', 'patched'):
... in_context = C.attribute
... raise ValueError()
Traceback (most recent call last):
...
ValueError
>>> in_context
'patched'
>>> C.attribute
'original'
"""
noattr = object() # mark that the attribute never existed
old = getattr(ob, attr, noattr)
setattr(ob, attr, value)
try:
yield
finally:
if old is noattr:
delattr(ob, attr)
else:
setattr(ob, attr, old)
@contextmanager
def set_timezone(tz):
"""Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
if is_platform_windows():
import pytest
pytest.skip("timezone setting not supported on windows")
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ['TZ']
except:
pass
else:
os.environ['TZ'] = tz
time.tzset()
orig_tz = os.environ.get('TZ')
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
| bsd-3-clause |
burakbayramli/dersblog | algs/algs_175_ocr/tf/test5.py | 2 | 1496 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
w = 128; h = 64
# junk image, only one
dataset = np.zeros((10,w,h,1))
pool_size = 1
num_filters = 16
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
inputs = tf.placeholder(tf.float32, [None, w, h, 1])
W_conv1 = weight_variable([3, 3, 1, num_filters])
b_conv1 = bias_variable([num_filters])
h_conv1 = tf.nn.relu(conv2d(inputs, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([3, 3, num_filters, num_filters])
b_conv2 = bias_variable([num_filters])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
h_pool2_flat = tf.reshape(h_pool2, [-1, 32, 256])
#W_fc1 = weight_variable([256, 32])
W_fc1 = tf.zeros([256, 32])
b_fc1 = bias_variable([32])
print h_pool2_flat
print W_fc1
h_fc1 = tf.matmul(h_pool2_flat, W_fc1)
#h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(h_pool2_flat, feed_dict={inputs: dataset})
print 'output',output.shape
| gpl-3.0 |
anntzer/scikit-learn | examples/mixture/plot_gmm_sin.py | 19 | 6100 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example demonstrates the behavior of Gaussian mixture models fit on data
that was not sampled from a mixture of Gaussian random variables. The dataset
is formed by 100 points loosely spaced following a noisy sine curve. There is
therefore no ground truth value for the number of Gaussian components.
The first model is a classical Gaussian Mixture Model with 10 components fit
with the Expectation-Maximization algorithm.
The second model is a Bayesian Gaussian Mixture Model with a Dirichlet process
prior fit with variational inference. The low value of the concentration prior
makes the model favor a lower number of active components. This models
"decides" to focus its modeling power on the big picture of the structure of
the dataset: groups of points with alternating directions modeled by
non-diagonal covariance matrices. Those alternating directions roughly capture
the alternating nature of the original sine signal.
The third model is also a Bayesian Gaussian mixture model with a Dirichlet
process prior but this time the value of the concentration prior is higher
giving the model more liberty to model the fine-grained structure of the data.
The result is a mixture with a larger number of active components that is
similar to the first model where we arbitrarily decided to fix the number of
components to 10.
Which model is the best is a matter of subjective judgment: do we want to
favor models that only capture the big picture to summarize and explain most of
the structure of the data while ignoring the details or do we prefer models
that closely follow the high density regions of the signal?
The last two panels show how we can sample from the last two models. The
resulting samples distributions do not look exactly like the original data
distribution. The difference primarily stems from the approximation error we
made by using a model that assumes that the data was generated by a finite
number of Gaussian components instead of a continuous noisy sine curve.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
print(__doc__)
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
def plot_results(X, Y, means, covariances, index, title):
splot = plt.subplot(5, 1, 1 + index)
for i, (mean, covar, color) in enumerate(zip(
means, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y == i):
continue
plt.scatter(X[Y == i, 0], X[Y == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6., 4. * np.pi - 6.)
plt.ylim(-5., 5.)
plt.title(title)
plt.xticks(())
plt.yticks(())
def plot_samples(X, Y, n_components, index, title):
plt.subplot(5, 1, 4 + index)
for i, color in zip(range(n_components), color_iter):
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y == i):
continue
plt.scatter(X[Y == i, 0], X[Y == i, 1], .8, color=color)
plt.xlim(-6., 4. * np.pi - 6.)
plt.ylim(-5., 5.)
plt.title(title)
plt.xticks(())
plt.yticks(())
# Parameters
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4. * np.pi / n_samples
for i in range(X.shape[0]):
x = i * step - 6.
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3. * (np.sin(x) + np.random.normal(0, .2))
plt.figure(figsize=(10, 10))
plt.subplots_adjust(bottom=.04, top=0.95, hspace=.2, wspace=.05,
left=.03, right=.97)
# Fit a Gaussian mixture with EM using ten components
gmm = mixture.GaussianMixture(n_components=10, covariance_type='full',
max_iter=100).fit(X)
plot_results(X, gmm.predict(X), gmm.means_, gmm.covariances_, 0,
'Expectation-maximization')
dpgmm = mixture.BayesianGaussianMixture(
n_components=10, covariance_type='full', weight_concentration_prior=1e-2,
weight_concentration_prior_type='dirichlet_process',
mean_precision_prior=1e-2, covariance_prior=1e0 * np.eye(2),
init_params="random", max_iter=100, random_state=2).fit(X)
plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 1,
"Bayesian Gaussian mixture models with a Dirichlet process prior "
r"for $\gamma_0=0.01$.")
X_s, y_s = dpgmm.sample(n_samples=2000)
plot_samples(X_s, y_s, dpgmm.n_components, 0,
"Gaussian mixture with a Dirichlet process prior "
r"for $\gamma_0=0.01$ sampled with $2000$ samples.")
dpgmm = mixture.BayesianGaussianMixture(
n_components=10, covariance_type='full', weight_concentration_prior=1e+2,
weight_concentration_prior_type='dirichlet_process',
mean_precision_prior=1e-2, covariance_prior=1e0 * np.eye(2),
init_params="kmeans", max_iter=100, random_state=2).fit(X)
plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 2,
"Bayesian Gaussian mixture models with a Dirichlet process prior "
r"for $\gamma_0=100$")
X_s, y_s = dpgmm.sample(n_samples=2000)
plot_samples(X_s, y_s, dpgmm.n_components, 1,
"Gaussian mixture with a Dirichlet process prior "
r"for $\gamma_0=100$ sampled with $2000$ samples.")
plt.show()
| bsd-3-clause |
EnSpec/SpecDAL | specdal/operators/proximal_join.py | 1 | 2806 | import pandas as pd
import numpy as np
import warnings
import logging as logging
logging.basicConfig(level=logging.WARNING,
format="%(levelname)s:%(name)s:%(message)s\n")
def get_column_types(df):
'''
Returns a tuple (wvl_cols, meta_cols), given a dataframe.
Notes
-----
Wavelength column is defined as columns with a numerical name (i.e. decimal).
Everything else is considered metadata column.
'''
isdigit = df.columns.map(str).str.replace('.', '').str.isdigit()
wvl_cols = df.columns[isdigit].sort_values()
meta_cols = df.columns.difference(wvl_cols)
return wvl_cols, meta_cols
def proximal_join(base_df, rover_df, on='gps_time_tgt', direction='nearest'):
'''
Perform proximal join and return a new dataframe.
Params
------
base_df: pandas.DataFrame
DataFrame of reference measurements
rover_df: pandas.DataFrame
DataFrame of target measurements
Returns
-------
proximal: pandas.DataFrame object
proximally processed dataset ( rover_df / base_df )
Notes
-----
As a side-effect, the rover dataframe is sorted by the key
Both base_df and rover_df must have the column specified by on.
This column must be the same type in base and rover.
'''
# remove spectra with missing join metadata from the dataset
bad_rover = rover_df[on].isnull()
bad_base = base_df[on].isnull()
if bad_rover.any():
logging.warning(
"Removing {} spectra with missing {} key from dataset."
.format(bad_rover.sum(),on))
if bad_base.any():
logging.warning(
"Removing {} reference spectra with missing {} key from dataset."
.format(bad_base.sum(),on))
rover_df = rover_df[~bad_rover]
base_df = base_df[~bad_base]
rover_wvl_cols, rover_meta_cols = get_column_types(rover_df)
base_wvl_cols, base_meta_cols = get_column_types(base_df)
# join the (sorted) keys
joined = pd.merge_asof(rover_df[on].sort_values().reset_index(),
base_df[on].sort_values().reset_index(),
on=on,
direction=direction,
suffixes=('_rover', '_base'))
rover_df = rover_df.loc[joined['index_rover']]
base_df = base_df.loc[joined['index_base']]
base_df.index = rover_df.index
metadata = pd.merge(rover_df[rover_meta_cols], base_df[base_meta_cols],
left_index=True, right_index=True,
suffixes=('_rover', '_base'))
proximal = rover_df[rover_wvl_cols]/base_df[base_wvl_cols]
proximal = pd.merge(metadata, proximal, left_index=True,
right_index=True) # retrieve metadata
return proximal
| mit |
TomAugspurger/pandas | pandas/tests/arithmetic/test_numeric.py | 1 | 46627 | # Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for numeric dtypes
from collections import abc
from decimal import Decimal
from itertools import combinations
import operator
from typing import Any, List
import numpy as np
import pytest
import pandas as pd
from pandas import Index, Series, Timedelta, TimedeltaIndex
import pandas._testing as tm
from pandas.core import ops
def adjust_negative_zero(zero, expected):
"""
Helper to adjust the expected result if we are dividing by -0.0
as opposed to 0.0
"""
if np.signbit(np.array(zero)).any():
# All entries in the `zero` fixture should be either
# all-negative or no-negative.
assert np.signbit(np.array(zero)).all()
expected *= -1
return expected
# TODO: remove this kludge once mypy stops giving false positives here
# List comprehension has incompatible type List[PandasObject]; expected List[RangeIndex]
# See GH#29725
ser_or_index: List[Any] = [pd.Series, pd.Index]
lefts: List[Any] = [pd.RangeIndex(10, 40, 10)]
lefts.extend(
[
cls([10, 20, 30], dtype=dtype)
for dtype in ["i1", "i2", "i4", "i8", "u1", "u2", "u4", "u8", "f2", "f4", "f8"]
for cls in ser_or_index
]
)
# ------------------------------------------------------------------
# Comparisons
class TestNumericComparisons:
def test_operator_series_comparison_zerorank(self):
# GH#13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
tm.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
tm.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
tm.assert_series_equal(result, expected)
def test_df_numeric_cmp_dt64_raises(self):
# GH#8932, GH#22163
ts = pd.Timestamp.now()
df = pd.DataFrame({"x": range(5)})
msg = (
"'[<>]' not supported between instances of 'numpy.ndarray' and 'Timestamp'"
)
with pytest.raises(TypeError, match=msg):
df > ts
with pytest.raises(TypeError, match=msg):
df < ts
with pytest.raises(TypeError, match=msg):
ts < df
with pytest.raises(TypeError, match=msg):
ts > df
assert not (df == ts).any().any()
assert (df != ts).all().all()
def test_compare_invalid(self):
# GH#8058
# ops testing
a = pd.Series(np.random.randn(5), name=0)
b = pd.Series(np.random.randn(5))
b.name = pd.Timestamp("2000-01-01")
tm.assert_series_equal(a / b, 1 / (b / a))
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Datetime/Timedelta Scalar
class TestNumericArraylikeArithmeticWithDatetimeLike:
# TODO: also check name retentention
@pytest.mark.parametrize("box_cls", [np.array, pd.Index, pd.Series])
@pytest.mark.parametrize(
"left", lefts, ids=lambda x: type(x).__name__ + str(x.dtype),
)
def test_mul_td64arr(self, left, box_cls):
# GH#22390
right = np.array([1, 2, 3], dtype="m8[s]")
right = box_cls(right)
expected = pd.TimedeltaIndex(["10s", "40s", "90s"])
if isinstance(left, pd.Series) or box_cls is pd.Series:
expected = pd.Series(expected)
result = left * right
tm.assert_equal(result, expected)
result = right * left
tm.assert_equal(result, expected)
# TODO: also check name retentention
@pytest.mark.parametrize("box_cls", [np.array, pd.Index, pd.Series])
@pytest.mark.parametrize(
"left", lefts, ids=lambda x: type(x).__name__ + str(x.dtype),
)
def test_div_td64arr(self, left, box_cls):
# GH#22390
right = np.array([10, 40, 90], dtype="m8[s]")
right = box_cls(right)
expected = pd.TimedeltaIndex(["1s", "2s", "3s"])
if isinstance(left, pd.Series) or box_cls is pd.Series:
expected = pd.Series(expected)
result = right / left
tm.assert_equal(result, expected)
result = right // left
tm.assert_equal(result, expected)
msg = "Cannot divide"
with pytest.raises(TypeError, match=msg):
left / right
with pytest.raises(TypeError, match=msg):
left // right
# TODO: de-duplicate with test_numeric_arr_mul_tdscalar
def test_ops_series(self):
# regression test for G#H8813
td = Timedelta("1 day")
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(["1 day", "2 days"]))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
# TODO: also test non-nanosecond timedelta64 and Tick objects;
# see test_numeric_arr_rdiv_tdscalar for note on these failing
@pytest.mark.parametrize(
"scalar_td",
[
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta(),
],
ids=lambda x: type(x).__name__,
)
def test_numeric_arr_mul_tdscalar(self, scalar_td, numeric_idx, box):
# GH#19333
index = numeric_idx
expected = pd.TimedeltaIndex([pd.Timedelta(days=n) for n in range(5)])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize(
"scalar_td",
[
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta(),
],
ids=lambda x: type(x).__name__,
)
def test_numeric_arr_mul_tdscalar_numexpr_path(self, scalar_td, box):
arr = np.arange(2 * 10 ** 4).astype(np.int64)
obj = tm.box_expected(arr, box, transpose=False)
expected = arr.view("timedelta64[D]").astype("timedelta64[ns]")
expected = tm.box_expected(expected, box, transpose=False)
result = obj * scalar_td
tm.assert_equal(result, expected)
result = scalar_td * obj
tm.assert_equal(result, expected)
def test_numeric_arr_rdiv_tdscalar(self, three_days, numeric_idx, box):
index = numeric_idx[1:3]
expected = TimedeltaIndex(["3 Days", "36 Hours"])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = three_days / index
tm.assert_equal(result, expected)
msg = "cannot use operands with types dtype"
with pytest.raises(TypeError, match=msg):
index / three_days
@pytest.mark.parametrize(
"other",
[
pd.Timedelta(hours=31),
pd.Timedelta(hours=31).to_pytimedelta(),
pd.Timedelta(hours=31).to_timedelta64(),
pd.Timedelta(hours=31).to_timedelta64().astype("m8[h]"),
np.timedelta64("NaT"),
np.timedelta64("NaT", "D"),
pd.offsets.Minute(3),
pd.offsets.Second(0),
],
)
def test_add_sub_timedeltalike_invalid(self, numeric_idx, other, box):
left = tm.box_expected(numeric_idx, box)
msg = (
"unsupported operand type|"
"Addition/subtraction of integers and integer-arrays|"
"Instead of adding/subtracting|"
"cannot use operands with types dtype"
)
with pytest.raises(TypeError, match=msg):
left + other
with pytest.raises(TypeError, match=msg):
other + left
with pytest.raises(TypeError, match=msg):
left - other
with pytest.raises(TypeError, match=msg):
other - left
@pytest.mark.parametrize(
"other",
[
pd.Timestamp.now().to_pydatetime(),
pd.Timestamp.now(tz="UTC").to_pydatetime(),
pd.Timestamp.now().to_datetime64(),
pd.NaT,
],
)
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
def test_add_sub_datetimelike_invalid(self, numeric_idx, other, box):
# GH#28080 numeric+datetime64 should raise; Timestamp raises
# NullFrequencyError instead of TypeError so is excluded.
left = tm.box_expected(numeric_idx, box)
msg = (
"unsupported operand type|"
"Cannot (add|subtract) NaT (to|from) ndarray|"
"Addition/subtraction of integers and integer-arrays"
)
with pytest.raises(TypeError, match=msg):
left + other
with pytest.raises(TypeError, match=msg):
other + left
with pytest.raises(TypeError, match=msg):
left - other
with pytest.raises(TypeError, match=msg):
other - left
# ------------------------------------------------------------------
# Arithmetic
class TestDivisionByZero:
def test_div_zero(self, zero, numeric_idx):
idx = numeric_idx
expected = pd.Index([np.nan, np.inf, np.inf, np.inf, np.inf], dtype=np.float64)
# We only adjust for Index, because Series does not yet apply
# the adjustment correctly.
expected2 = adjust_negative_zero(zero, expected)
result = idx / zero
tm.assert_index_equal(result, expected2)
ser_compat = Series(idx).astype("i8") / np.array(zero).astype("i8")
tm.assert_series_equal(ser_compat, Series(expected))
def test_floordiv_zero(self, zero, numeric_idx):
idx = numeric_idx
expected = pd.Index([np.nan, np.inf, np.inf, np.inf, np.inf], dtype=np.float64)
# We only adjust for Index, because Series does not yet apply
# the adjustment correctly.
expected2 = adjust_negative_zero(zero, expected)
result = idx // zero
tm.assert_index_equal(result, expected2)
ser_compat = Series(idx).astype("i8") // np.array(zero).astype("i8")
tm.assert_series_equal(ser_compat, Series(expected))
def test_mod_zero(self, zero, numeric_idx):
idx = numeric_idx
expected = pd.Index([np.nan, np.nan, np.nan, np.nan, np.nan], dtype=np.float64)
result = idx % zero
tm.assert_index_equal(result, expected)
ser_compat = Series(idx).astype("i8") % np.array(zero).astype("i8")
tm.assert_series_equal(ser_compat, Series(result))
def test_divmod_zero(self, zero, numeric_idx):
idx = numeric_idx
exleft = pd.Index([np.nan, np.inf, np.inf, np.inf, np.inf], dtype=np.float64)
exright = pd.Index([np.nan, np.nan, np.nan, np.nan, np.nan], dtype=np.float64)
exleft = adjust_negative_zero(zero, exleft)
result = divmod(idx, zero)
tm.assert_index_equal(result[0], exleft)
tm.assert_index_equal(result[1], exright)
@pytest.mark.parametrize("op", [operator.truediv, operator.floordiv])
def test_div_negative_zero(self, zero, numeric_idx, op):
# Check that -1 / -0.0 returns np.inf, not -np.inf
if isinstance(numeric_idx, pd.UInt64Index):
return
idx = numeric_idx - 3
expected = pd.Index(
[-np.inf, -np.inf, -np.inf, np.nan, np.inf], dtype=np.float64
)
expected = adjust_negative_zero(zero, expected)
result = op(idx, zero)
tm.assert_index_equal(result, expected)
# ------------------------------------------------------------------
@pytest.mark.parametrize("dtype1", [np.int64, np.float64, np.uint64])
def test_ser_div_ser(self, dtype1, any_real_dtype):
# no longer do integer div for any ops, but deal with the 0's
dtype2 = any_real_dtype
first = Series([3, 4, 5, 8], name="first").astype(dtype1)
second = Series([0, 0, 0, 3], name="second").astype(dtype2)
with np.errstate(all="ignore"):
expected = Series(
first.values.astype(np.float64) / second.values,
dtype="float64",
name=None,
)
expected.iloc[0:3] = np.inf
result = first / second
tm.assert_series_equal(result, expected)
assert not result.equals(second / first)
@pytest.mark.parametrize("dtype1", [np.int64, np.float64, np.uint64])
def test_ser_divmod_zero(self, dtype1, any_real_dtype):
# GH#26987
dtype2 = any_real_dtype
left = pd.Series([1, 1]).astype(dtype1)
right = pd.Series([0, 2]).astype(dtype2)
# GH#27321 pandas convention is to set 1 // 0 to np.inf, as opposed
# to numpy which sets to np.nan; patch `expected[0]` below
expected = left // right, left % right
expected = list(expected)
expected[0] = expected[0].astype(np.float64)
expected[0][0] = np.inf
result = divmod(left, right)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
# rdivmod case
result = divmod(left.values, right)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
def test_ser_divmod_inf(self):
left = pd.Series([np.inf, 1.0])
right = pd.Series([np.inf, 2.0])
expected = left // right, left % right
result = divmod(left, right)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
# rdivmod case
result = divmod(left.values, right)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
def test_rdiv_zero_compat(self):
# GH#8674
zero_array = np.array([0] * 5)
data = np.random.randn(5)
expected = Series([0.0] * 5)
result = zero_array / Series(data)
tm.assert_series_equal(result, expected)
result = Series(zero_array) / data
tm.assert_series_equal(result, expected)
result = Series(zero_array) / Series(data)
tm.assert_series_equal(result, expected)
def test_div_zero_inf_signs(self):
# GH#9144, inf signing
ser = Series([-1, 0, 1], name="first")
expected = Series([-np.inf, np.nan, np.inf], name="first")
result = ser / 0
tm.assert_series_equal(result, expected)
def test_rdiv_zero(self):
# GH#9144
ser = Series([-1, 0, 1], name="first")
expected = Series([0.0, np.nan, 0.0], name="first")
result = 0 / ser
tm.assert_series_equal(result, expected)
def test_floordiv_div(self):
# GH#9144
ser = Series([-1, 0, 1], name="first")
result = ser // 0
expected = Series([-np.inf, np.nan, np.inf], name="first")
tm.assert_series_equal(result, expected)
def test_df_div_zero_df(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]})
result = df / df
first = pd.Series([1.0, 1.0, 1.0, 1.0])
second = pd.Series([np.nan, np.nan, np.nan, 1])
expected = pd.DataFrame({"first": first, "second": second})
tm.assert_frame_equal(result, expected)
def test_df_div_zero_array(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]})
first = pd.Series([1.0, 1.0, 1.0, 1.0])
second = pd.Series([np.nan, np.nan, np.nan, 1])
expected = pd.DataFrame({"first": first, "second": second})
with np.errstate(all="ignore"):
arr = df.values.astype("float") / df.values
result = pd.DataFrame(arr, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_df_div_zero_int(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]})
result = df / 0
expected = pd.DataFrame(np.inf, index=df.index, columns=df.columns)
expected.iloc[0:3, 1] = np.nan
tm.assert_frame_equal(result, expected)
# numpy has a slightly different (wrong) treatment
with np.errstate(all="ignore"):
arr = df.values.astype("float64") / 0
result2 = pd.DataFrame(arr, index=df.index, columns=df.columns)
tm.assert_frame_equal(result2, expected)
def test_df_div_zero_series_does_not_commute(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame(np.random.randn(10, 5))
ser = df[0]
res = ser / df
res2 = df / ser
assert not res.fillna(0).equals(res2.fillna(0))
# ------------------------------------------------------------------
# Mod By Zero
def test_df_mod_zero_df(self):
# GH#3590, modulo as ints
df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]})
# this is technically wrong, as the integer portion is coerced to float
# ###
first = pd.Series([0, 0, 0, 0], dtype="float64")
second = pd.Series([np.nan, np.nan, np.nan, 0])
expected = pd.DataFrame({"first": first, "second": second})
result = df % df
tm.assert_frame_equal(result, expected)
def test_df_mod_zero_array(self):
# GH#3590, modulo as ints
df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]})
# this is technically wrong, as the integer portion is coerced to float
# ###
first = pd.Series([0, 0, 0, 0], dtype="float64")
second = pd.Series([np.nan, np.nan, np.nan, 0])
expected = pd.DataFrame({"first": first, "second": second})
# numpy has a slightly different (wrong) treatment
with np.errstate(all="ignore"):
arr = df.values % df.values
result2 = pd.DataFrame(arr, index=df.index, columns=df.columns, dtype="float64")
result2.iloc[0:3, 1] = np.nan
tm.assert_frame_equal(result2, expected)
def test_df_mod_zero_int(self):
# GH#3590, modulo as ints
df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]})
result = df % 0
expected = pd.DataFrame(np.nan, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
# numpy has a slightly different (wrong) treatment
with np.errstate(all="ignore"):
arr = df.values.astype("float64") % 0
result2 = pd.DataFrame(arr, index=df.index, columns=df.columns)
tm.assert_frame_equal(result2, expected)
def test_df_mod_zero_series_does_not_commute(self):
# GH#3590, modulo as ints
# not commutative with series
df = pd.DataFrame(np.random.randn(10, 5))
ser = df[0]
res = ser % df
res2 = df % ser
assert not res.fillna(0).equals(res2.fillna(0))
class TestMultiplicationDivision:
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# for non-timestamp/timedelta/period dtypes
@pytest.mark.parametrize(
"box",
[
pytest.param(
pd.Index,
marks=pytest.mark.xfail(
reason="Index.__div__ always raises", raises=TypeError
),
),
pd.Series,
pd.DataFrame,
],
ids=lambda x: x.__name__,
)
def test_divide_decimal(self, box):
# resolves issue GH#9787
ser = Series([Decimal(10)])
expected = Series([Decimal(5)])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = ser / Decimal(2)
tm.assert_equal(result, expected)
result = ser // Decimal(2)
tm.assert_equal(result, expected)
def test_div_equiv_binop(self):
# Test Series.div as well as Series.__div__
# float/integer issue
# GH#7785
first = Series([1, 0], name="first")
second = Series([-0.01, -0.02], name="second")
expected = Series([-0.01, -np.inf])
result = second.div(first)
tm.assert_series_equal(result, expected, check_names=False)
result = second / first
tm.assert_series_equal(result, expected)
def test_div_int(self, numeric_idx):
idx = numeric_idx
result = idx / 1
expected = idx.astype("float64")
tm.assert_index_equal(result, expected)
result = idx / 2
expected = Index(idx.values / 2)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("op", [operator.mul, ops.rmul, operator.floordiv])
def test_mul_int_identity(self, op, numeric_idx, box_with_array):
idx = numeric_idx
idx = tm.box_expected(idx, box_with_array)
result = op(idx, 1)
tm.assert_equal(result, idx)
def test_mul_int_array(self, numeric_idx):
idx = numeric_idx
didx = idx * idx
result = idx * np.array(5, dtype="int64")
tm.assert_index_equal(result, idx * 5)
arr_dtype = "uint64" if isinstance(idx, pd.UInt64Index) else "int64"
result = idx * np.arange(5, dtype=arr_dtype)
tm.assert_index_equal(result, didx)
def test_mul_int_series(self, numeric_idx):
idx = numeric_idx
didx = idx * idx
arr_dtype = "uint64" if isinstance(idx, pd.UInt64Index) else "int64"
result = idx * Series(np.arange(5, dtype=arr_dtype))
tm.assert_series_equal(result, Series(didx))
def test_mul_float_series(self, numeric_idx):
idx = numeric_idx
rng5 = np.arange(5, dtype="float64")
result = idx * Series(rng5 + 0.1)
expected = Series(rng5 * (rng5 + 0.1))
tm.assert_series_equal(result, expected)
def test_mul_index(self, numeric_idx):
# in general not true for RangeIndex
idx = numeric_idx
if not isinstance(idx, pd.RangeIndex):
result = idx * idx
tm.assert_index_equal(result, idx ** 2)
def test_mul_datelike_raises(self, numeric_idx):
idx = numeric_idx
msg = "cannot perform __rmul__ with this index type"
with pytest.raises(TypeError, match=msg):
idx * pd.date_range("20130101", periods=5)
def test_mul_size_mismatch_raises(self, numeric_idx):
idx = numeric_idx
msg = "operands could not be broadcast together"
with pytest.raises(ValueError, match=msg):
idx * idx[0:3]
with pytest.raises(ValueError, match=msg):
idx * np.array([1, 2])
@pytest.mark.parametrize("op", [operator.pow, ops.rpow])
def test_pow_float(self, op, numeric_idx, box_with_array):
# test power calculations both ways, GH#14973
box = box_with_array
idx = numeric_idx
expected = pd.Float64Index(op(idx.values, 2.0))
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = op(idx, 2.0)
tm.assert_equal(result, expected)
def test_modulo(self, numeric_idx, box_with_array):
# GH#9244
box = box_with_array
idx = numeric_idx
expected = Index(idx.values % 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx % 2
tm.assert_equal(result, expected)
def test_divmod_scalar(self, numeric_idx):
idx = numeric_idx
result = divmod(idx, 2)
with np.errstate(all="ignore"):
div, mod = divmod(idx.values, 2)
expected = Index(div), Index(mod)
for r, e in zip(result, expected):
tm.assert_index_equal(r, e)
def test_divmod_ndarray(self, numeric_idx):
idx = numeric_idx
other = np.ones(idx.values.shape, dtype=idx.values.dtype) * 2
result = divmod(idx, other)
with np.errstate(all="ignore"):
div, mod = divmod(idx.values, other)
expected = Index(div), Index(mod)
for r, e in zip(result, expected):
tm.assert_index_equal(r, e)
def test_divmod_series(self, numeric_idx):
idx = numeric_idx
other = np.ones(idx.values.shape, dtype=idx.values.dtype) * 2
result = divmod(idx, Series(other))
with np.errstate(all="ignore"):
div, mod = divmod(idx.values, other)
expected = Series(div), Series(mod)
for r, e in zip(result, expected):
tm.assert_series_equal(r, e)
@pytest.mark.parametrize("other", [np.nan, 7, -23, 2.718, -3.14, np.inf])
def test_ops_np_scalar(self, other):
vals = np.random.randn(5, 3)
f = lambda x: pd.DataFrame(
x, index=list("ABCDE"), columns=["jim", "joe", "jolie"]
)
df = f(vals)
tm.assert_frame_equal(df / np.array(other), f(vals / other))
tm.assert_frame_equal(np.array(other) * df, f(vals * other))
tm.assert_frame_equal(df + np.array(other), f(vals + other))
tm.assert_frame_equal(np.array(other) - df, f(other - vals))
# TODO: This came from series.test.test_operators, needs cleanup
def test_operators_frame(self):
# rpow does not work with DataFrame
ts = tm.makeTimeSeries()
ts.name = "ts"
df = pd.DataFrame({"A": ts})
tm.assert_series_equal(ts + ts, ts + df["A"], check_names=False)
tm.assert_series_equal(ts ** ts, ts ** df["A"], check_names=False)
tm.assert_series_equal(ts < ts, ts < df["A"], check_names=False)
tm.assert_series_equal(ts / ts, ts / df["A"], check_names=False)
# TODO: this came from tests.series.test_analytics, needs cleanup and
# de-duplication with test_modulo above
def test_modulo2(self):
with np.errstate(all="ignore"):
# GH#3590, modulo as ints
p = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]})
result = p["first"] % p["second"]
expected = Series(p["first"].values % p["second"].values, dtype="float64")
expected.iloc[0:3] = np.nan
tm.assert_series_equal(result, expected)
result = p["first"] % 0
expected = Series(np.nan, index=p.index, name="first")
tm.assert_series_equal(result, expected)
p = p.astype("float64")
result = p["first"] % p["second"]
expected = Series(p["first"].values % p["second"].values)
tm.assert_series_equal(result, expected)
p = p.astype("float64")
result = p["first"] % p["second"]
result2 = p["second"] % p["first"]
assert not result.equals(result2)
def test_modulo_zero_int(self):
# GH#9144
with np.errstate(all="ignore"):
s = Series([0, 1])
result = s % 0
expected = Series([np.nan, np.nan])
tm.assert_series_equal(result, expected)
result = 0 % s
expected = Series([np.nan, 0.0])
tm.assert_series_equal(result, expected)
class TestAdditionSubtraction:
# __add__, __sub__, __radd__, __rsub__, __iadd__, __isub__
# for non-timestamp/timedelta/period dtypes
# TODO: This came from series.test.test_operators, needs cleanup
def test_arith_ops_df_compat(self):
# GH#1134
s1 = pd.Series([1, 2, 3], index=list("ABC"), name="x")
s2 = pd.Series([2, 2, 2], index=list("ABD"), name="x")
exp = pd.Series([3.0, 4.0, np.nan, np.nan], index=list("ABCD"), name="x")
tm.assert_series_equal(s1 + s2, exp)
tm.assert_series_equal(s2 + s1, exp)
exp = pd.DataFrame({"x": [3.0, 4.0, np.nan, np.nan]}, index=list("ABCD"))
tm.assert_frame_equal(s1.to_frame() + s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() + s1.to_frame(), exp)
# different length
s3 = pd.Series([1, 2, 3], index=list("ABC"), name="x")
s4 = pd.Series([2, 2, 2, 2], index=list("ABCD"), name="x")
exp = pd.Series([3, 4, 5, np.nan], index=list("ABCD"), name="x")
tm.assert_series_equal(s3 + s4, exp)
tm.assert_series_equal(s4 + s3, exp)
exp = pd.DataFrame({"x": [3, 4, 5, np.nan]}, index=list("ABCD"))
tm.assert_frame_equal(s3.to_frame() + s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() + s3.to_frame(), exp)
# TODO: This came from series.test.test_operators, needs cleanup
def test_series_frame_radd_bug(self):
# GH#353
vals = pd.Series(tm.rands_array(5, 10))
result = "foo_" + vals
expected = vals.map(lambda x: "foo_" + x)
tm.assert_series_equal(result, expected)
frame = pd.DataFrame({"vals": vals})
result = "foo_" + frame
expected = pd.DataFrame({"vals": vals.map(lambda x: "foo_" + x)})
tm.assert_frame_equal(result, expected)
ts = tm.makeTimeSeries()
ts.name = "ts"
# really raise this time
now = pd.Timestamp.now().to_pydatetime()
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
now + ts
with pytest.raises(TypeError, match=msg):
ts + now
# TODO: This came from series.test.test_operators, needs cleanup
def test_datetime64_with_index(self):
# arithmetic integer ops with an index
ser = pd.Series(np.random.randn(5))
expected = ser - ser.index.to_series()
result = ser - ser.index
tm.assert_series_equal(result, expected)
# GH#4629
# arithmetic datetime64 ops with an index
ser = pd.Series(
pd.date_range("20130101", periods=5),
index=pd.date_range("20130101", periods=5),
)
expected = ser - ser.index.to_series()
result = ser - ser.index
tm.assert_series_equal(result, expected)
msg = "cannot subtract period"
with pytest.raises(TypeError, match=msg):
# GH#18850
result = ser - ser.index.to_period()
df = pd.DataFrame(
np.random.randn(5, 2), index=pd.date_range("20130101", periods=5)
)
df["date"] = pd.Timestamp("20130102")
df["expected"] = df["date"] - df.index.to_series()
df["result"] = df["date"] - df.index
tm.assert_series_equal(df["result"], df["expected"], check_names=False)
# TODO: taken from tests.frame.test_operators, needs cleanup
def test_frame_operators(self, float_frame):
frame = float_frame
frame2 = pd.DataFrame(float_frame, columns=["D", "C", "B", "A"])
garbage = np.random.random(4)
colSeries = pd.Series(garbage, index=np.array(frame.columns))
idSum = frame + frame
seriesSum = frame + colSeries
for col, series in idSum.items():
for idx, val in series.items():
origVal = frame[col][idx] * 2
if not np.isnan(val):
assert val == origVal
else:
assert np.isnan(origVal)
for col, series in seriesSum.items():
for idx, val in series.items():
origVal = frame[col][idx] + colSeries[col]
if not np.isnan(val):
assert val == origVal
else:
assert np.isnan(origVal)
added = frame2 + frame2
expected = frame2 * 2
tm.assert_frame_equal(added, expected)
df = pd.DataFrame({"a": ["a", None, "b"]})
tm.assert_frame_equal(df + df, pd.DataFrame({"a": ["aa", np.nan, "bb"]}))
# Test for issue #10181
for dtype in ("float", "int64"):
frames = [
pd.DataFrame(dtype=dtype),
pd.DataFrame(columns=["A"], dtype=dtype),
pd.DataFrame(index=[0], dtype=dtype),
]
for df in frames:
assert (df + df).equals(df)
tm.assert_frame_equal(df + df, df)
# TODO: taken from tests.series.test_operators; needs cleanup
def test_series_operators(self):
def _check_op(series, other, op, pos_only=False):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
if isinstance(other, Series) and not other.index.equals(series.index):
python.index = python.index._with_freq(None)
tm.assert_series_equal(cython_or_numpy, python)
def check(series, other):
simple_ops = ["add", "sub", "mul", "truediv", "floordiv", "mod"]
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, ops.radd)
_check_op(series, other, ops.rsub)
_check_op(series, other, ops.rtruediv)
_check_op(series, other, ops.rfloordiv)
_check_op(series, other, ops.rmul)
_check_op(series, other, ops.rpow, pos_only=True)
_check_op(series, other, ops.rmod)
tser = tm.makeTimeSeries().rename("ts")
check(tser, tser * 2)
check(tser, tser[::2])
check(tser, 5)
def check_comparators(series, other):
_check_op(series, other, operator.gt)
_check_op(series, other, operator.ge)
_check_op(series, other, operator.eq)
_check_op(series, other, operator.lt)
_check_op(series, other, operator.le)
check_comparators(tser, 5)
check_comparators(tser, tser + 1)
# TODO: taken from tests.series.test_operators; needs cleanup
def test_divmod(self):
def check(series, other):
results = divmod(series, other)
if isinstance(other, abc.Iterable) and len(series) != len(other):
# if the lengths don't match, this is the test where we use
# `tser[::2]`. Pad every other value in `other_np` with nan.
other_np = []
for n in other:
other_np.append(n)
other_np.append(np.nan)
else:
other_np = other
other_np = np.asarray(other_np)
with np.errstate(all="ignore"):
expecteds = divmod(series.values, np.asarray(other_np))
for result, expected in zip(results, expecteds):
# check the values, name, and index separately
tm.assert_almost_equal(np.asarray(result), expected)
assert result.name == series.name
tm.assert_index_equal(result.index, series.index._with_freq(None))
tser = tm.makeTimeSeries().rename("ts")
check(tser, tser * 2)
check(tser, tser[::2])
check(tser, 5)
def test_series_divmod_zero(self):
# Check that divmod uses pandas convention for division by zero,
# which does not match numpy.
# pandas convention has
# 1/0 == np.inf
# -1/0 == -np.inf
# 1/-0.0 == -np.inf
# -1/-0.0 == np.inf
tser = tm.makeTimeSeries().rename("ts")
other = tser * 0
result = divmod(tser, other)
exp1 = pd.Series([np.inf] * len(tser), index=tser.index, name="ts")
exp2 = pd.Series([np.nan] * len(tser), index=tser.index, name="ts")
tm.assert_series_equal(result[0], exp1)
tm.assert_series_equal(result[1], exp2)
class TestUFuncCompat:
@pytest.mark.parametrize(
"holder",
[pd.Int64Index, pd.UInt64Index, pd.Float64Index, pd.RangeIndex, pd.Series],
)
def test_ufunc_compat(self, holder):
box = pd.Series if holder is pd.Series else pd.Index
if holder is pd.RangeIndex:
idx = pd.RangeIndex(0, 5)
else:
idx = holder(np.arange(5, dtype="int64"))
result = np.sin(idx)
expected = box(np.sin(np.arange(5, dtype="int64")))
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"holder", [pd.Int64Index, pd.UInt64Index, pd.Float64Index, pd.Series]
)
def test_ufunc_coercions(self, holder):
idx = holder([1, 2, 3, 4, 5], name="x")
box = pd.Series if holder is pd.Series else pd.Index
result = np.sqrt(idx)
assert result.dtype == "f8" and isinstance(result, box)
exp = pd.Float64Index(np.sqrt(np.array([1, 2, 3, 4, 5])), name="x")
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
result = np.divide(idx, 2.0)
assert result.dtype == "f8" and isinstance(result, box)
exp = pd.Float64Index([0.5, 1.0, 1.5, 2.0, 2.5], name="x")
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
# _evaluate_numeric_binop
result = idx + 2.0
assert result.dtype == "f8" and isinstance(result, box)
exp = pd.Float64Index([3.0, 4.0, 5.0, 6.0, 7.0], name="x")
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
result = idx - 2.0
assert result.dtype == "f8" and isinstance(result, box)
exp = pd.Float64Index([-1.0, 0.0, 1.0, 2.0, 3.0], name="x")
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
result = idx * 1.0
assert result.dtype == "f8" and isinstance(result, box)
exp = pd.Float64Index([1.0, 2.0, 3.0, 4.0, 5.0], name="x")
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
result = idx / 2.0
assert result.dtype == "f8" and isinstance(result, box)
exp = pd.Float64Index([0.5, 1.0, 1.5, 2.0, 2.5], name="x")
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
@pytest.mark.parametrize(
"holder", [pd.Int64Index, pd.UInt64Index, pd.Float64Index, pd.Series]
)
def test_ufunc_multiple_return_values(self, holder):
obj = holder([1, 2, 3], name="x")
box = pd.Series if holder is pd.Series else pd.Index
result = np.modf(obj)
assert isinstance(result, tuple)
exp1 = pd.Float64Index([0.0, 0.0, 0.0], name="x")
exp2 = pd.Float64Index([1.0, 2.0, 3.0], name="x")
tm.assert_equal(result[0], tm.box_expected(exp1, box))
tm.assert_equal(result[1], tm.box_expected(exp2, box))
def test_ufunc_at(self):
s = pd.Series([0, 1, 2], index=[1, 2, 3], name="x")
np.add.at(s, [0, 2], 10)
expected = pd.Series([10, 1, 12], index=[1, 2, 3], name="x")
tm.assert_series_equal(s, expected)
class TestObjectDtypeEquivalence:
# Tests that arithmetic operations match operations executed elementwise
@pytest.mark.parametrize("dtype", [None, object])
def test_numarr_with_dtype_add_nan(self, dtype, box_with_array):
box = box_with_array
ser = pd.Series([1, 2, 3], dtype=dtype)
expected = pd.Series([np.nan, np.nan, np.nan], dtype=dtype)
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = np.nan + ser
tm.assert_equal(result, expected)
result = ser + np.nan
tm.assert_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_numarr_with_dtype_add_int(self, dtype, box_with_array):
box = box_with_array
ser = pd.Series([1, 2, 3], dtype=dtype)
expected = pd.Series([2, 3, 4], dtype=dtype)
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = 1 + ser
tm.assert_equal(result, expected)
result = ser + 1
tm.assert_equal(result, expected)
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"op",
[operator.add, operator.sub, operator.mul, operator.truediv, operator.floordiv],
)
def test_operators_reverse_object(self, op):
# GH#56
arr = pd.Series(np.random.randn(10), index=np.arange(10), dtype=object)
result = op(1.0, arr)
expected = op(1.0, arr.astype(float))
tm.assert_series_equal(result.astype(float), expected)
class TestNumericArithmeticUnsorted:
# Tests in this class have been moved from type-specific test modules
# but not yet sorted, parametrized, and de-duplicated
def check_binop(self, ops, scalars, idxs):
for op in ops:
for a, b in combinations(idxs, 2):
result = op(a, b)
expected = op(pd.Int64Index(a), pd.Int64Index(b))
tm.assert_index_equal(result, expected)
for idx in idxs:
for scalar in scalars:
result = op(idx, scalar)
expected = op(pd.Int64Index(idx), scalar)
tm.assert_index_equal(result, expected)
def test_binops(self):
ops = [
operator.add,
operator.sub,
operator.mul,
operator.floordiv,
operator.truediv,
]
scalars = [-1, 1, 2]
idxs = [
pd.RangeIndex(0, 10, 1),
pd.RangeIndex(0, 20, 2),
pd.RangeIndex(-10, 10, 2),
pd.RangeIndex(5, -5, -1),
]
self.check_binop(ops, scalars, idxs)
def test_binops_pow(self):
# numpy does not allow powers of negative integers so test separately
# https://github.com/numpy/numpy/pull/8127
ops = [pow]
scalars = [1, 2]
idxs = [pd.RangeIndex(0, 10, 1), pd.RangeIndex(0, 20, 2)]
self.check_binop(ops, scalars, idxs)
# TODO: mod, divmod?
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.sub,
operator.mul,
operator.floordiv,
operator.truediv,
operator.pow,
],
)
def test_arithmetic_with_frame_or_series(self, op):
# check that we return NotImplemented when operating with Series
# or DataFrame
index = pd.RangeIndex(5)
other = pd.Series(np.random.randn(5))
expected = op(pd.Series(index), other)
result = op(index, other)
tm.assert_series_equal(result, expected)
other = pd.DataFrame(np.random.randn(2, 5))
expected = op(pd.DataFrame([index, index]), other)
result = op(index, other)
tm.assert_frame_equal(result, expected)
def test_numeric_compat2(self):
# validate that we are handling the RangeIndex overrides to numeric ops
# and returning RangeIndex where possible
idx = pd.RangeIndex(0, 10, 2)
result = idx * 2
expected = pd.RangeIndex(0, 20, 4)
tm.assert_index_equal(result, expected, exact=True)
result = idx + 2
expected = pd.RangeIndex(2, 12, 2)
tm.assert_index_equal(result, expected, exact=True)
result = idx - 2
expected = pd.RangeIndex(-2, 8, 2)
tm.assert_index_equal(result, expected, exact=True)
result = idx / 2
expected = pd.RangeIndex(0, 5, 1).astype("float64")
tm.assert_index_equal(result, expected, exact=True)
result = idx / 4
expected = pd.RangeIndex(0, 10, 2) / 4
tm.assert_index_equal(result, expected, exact=True)
result = idx // 1
expected = idx
tm.assert_index_equal(result, expected, exact=True)
# __mul__
result = idx * idx
expected = Index(idx.values * idx.values)
tm.assert_index_equal(result, expected, exact=True)
# __pow__
idx = pd.RangeIndex(0, 1000, 2)
result = idx ** 2
expected = idx._int64index ** 2
tm.assert_index_equal(Index(result.values), expected, exact=True)
# __floordiv__
cases_exact = [
(pd.RangeIndex(0, 1000, 2), 2, pd.RangeIndex(0, 500, 1)),
(pd.RangeIndex(-99, -201, -3), -3, pd.RangeIndex(33, 67, 1)),
(pd.RangeIndex(0, 1000, 1), 2, pd.RangeIndex(0, 1000, 1)._int64index // 2),
(
pd.RangeIndex(0, 100, 1),
2.0,
pd.RangeIndex(0, 100, 1)._int64index // 2.0,
),
(pd.RangeIndex(0), 50, pd.RangeIndex(0)),
(pd.RangeIndex(2, 4, 2), 3, pd.RangeIndex(0, 1, 1)),
(pd.RangeIndex(-5, -10, -6), 4, pd.RangeIndex(-2, -1, 1)),
(pd.RangeIndex(-100, -200, 3), 2, pd.RangeIndex(0)),
]
for idx, div, expected in cases_exact:
tm.assert_index_equal(idx // div, expected, exact=True)
@pytest.mark.parametrize("dtype", [np.int64, np.float64])
@pytest.mark.parametrize("delta", [1, 0, -1])
def test_addsub_arithmetic(self, dtype, delta):
# GH#8142
delta = dtype(delta)
index = pd.Index([10, 11, 12], dtype=dtype)
result = index + delta
expected = pd.Index(index.values + delta, dtype=dtype)
tm.assert_index_equal(result, expected)
# this subtraction used to fail
result = index - delta
expected = pd.Index(index.values - delta, dtype=dtype)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index + index, 2 * index)
tm.assert_index_equal(index - index, 0 * index)
assert not (index - index).empty
def test_fill_value_inf_masking():
# GH #27464 make sure we mask 0/1 with Inf and not NaN
df = pd.DataFrame({"A": [0, 1, 2], "B": [1.1, None, 1.1]})
other = pd.DataFrame({"A": [1.1, 1.2, 1.3]}, index=[0, 2, 3])
result = df.rfloordiv(other, fill_value=1)
expected = pd.DataFrame(
{"A": [np.inf, 1.0, 0.0, 1.0], "B": [0.0, np.nan, 0.0, np.nan]}
)
tm.assert_frame_equal(result, expected)
def test_dataframe_div_silenced():
# GH#26793
pdf1 = pd.DataFrame(
{
"A": np.arange(10),
"B": [np.nan, 1, 2, 3, 4] * 2,
"C": [np.nan] * 10,
"D": np.arange(10),
},
index=list("abcdefghij"),
columns=list("ABCD"),
)
pdf2 = pd.DataFrame(
np.random.randn(10, 4), index=list("abcdefghjk"), columns=list("ABCX")
)
with tm.assert_produces_warning(None):
pdf1.div(pdf2, fill_value=0)
| bsd-3-clause |
rahuldhote/scikit-learn | sklearn/cluster/tests/test_affinity_propagation.py | 341 | 2620 | """
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
# Affinity Propagation algorithm
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
# Test AffinityPropagation.predict
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
# Test exception in AffinityPropagation.predict
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
| bsd-3-clause |
gclenaghan/scikit-learn | examples/applications/plot_prediction_latency.py | 234 | 11277 | """
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[i, :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[0])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| bsd-3-clause |
pprett/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 7 | 47843 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.datasets import load_iris, make_classification
from sklearn.metrics import log_loss
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import compute_class_weight
from sklearn.utils.fixes import sp_version
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import raises
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='sag', tol=1e-2,
multi_class='ovr', random_state=42),
LogisticRegression(C=len(iris.data), solver='saga', tol=1e-2,
multi_class='ovr', random_state=42)
]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg', 'sag', 'saga']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg, lbfgs"
" and sag solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# only 'liblinear' solver
msg = "Solver liblinear does not support a multinomial backend."
lr = LR(solver='liblinear', multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs', 'sag']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
for solver in ['newton-cg', 'lbfgs', 'sag', 'saga']:
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg', 'sag', 'saga']:
clf = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=2000)
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for solver in ['sag', 'saga']:
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-5, solver=solver,
max_iter=1000,
random_state=0)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-5,
solver=solver,
random_state=0)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4,
err_msg="with solver = %s" % solver)
# test for fit_intercept=True
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'):
Cs = [1e3]
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-6, solver=solver,
intercept_scaling=10000., random_state=0)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000., random_state=0)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4,
err_msg="with solver = %s" % solver)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20, random_state=0)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20, random_state=0)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1,))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_multinomial_logistic_regression_string_inputs():
# Test with string labels for LogisticRegression(CV)
n_samples, n_features, n_classes = 50, 5, 3
X_ref, y = make_classification(n_samples=n_samples, n_features=n_features,
n_classes=n_classes, n_informative=3,
random_state=0)
y_str = LabelEncoder().fit(['bar', 'baz', 'foo']).inverse_transform(y)
# For numerical labels, let y values be taken from set (-1, 0, 1)
y = np.array(y) - 1
# Test for string labels
lr = LogisticRegression(solver='lbfgs', multi_class='multinomial')
lr_cv = LogisticRegressionCV(solver='lbfgs', multi_class='multinomial')
lr_str = LogisticRegression(solver='lbfgs', multi_class='multinomial')
lr_cv_str = LogisticRegressionCV(solver='lbfgs', multi_class='multinomial')
lr.fit(X_ref, y)
lr_cv.fit(X_ref, y)
lr_str.fit(X_ref, y_str)
lr_cv_str.fit(X_ref, y_str)
assert_array_almost_equal(lr.coef_, lr_str.coef_)
assert_equal(sorted(lr_str.classes_), ['bar', 'baz', 'foo'])
assert_array_almost_equal(lr_cv.coef_, lr_cv_str.coef_)
assert_equal(sorted(lr_str.classes_), ['bar', 'baz', 'foo'])
assert_equal(sorted(lr_cv_str.classes_), ['bar', 'baz', 'foo'])
# The predictions should be in original labels
assert_equal(sorted(np.unique(lr_str.predict(X_ref))),
['bar', 'baz', 'foo'])
assert_equal(sorted(np.unique(lr_cv_str.predict(X_ref))),
['bar', 'baz', 'foo'])
# Make sure class weights can be given with string labels
lr_cv_str = LogisticRegression(
solver='lbfgs', class_weight={'bar': 1, 'baz': 2, 'foo': 0},
multi_class='multinomial').fit(X_ref, y_str)
assert_equal(sorted(np.unique(lr_cv_str.predict(X_ref))), ['bar', 'baz'])
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# The cv indices from stratified kfold (where stratification is done based
# on the fine-grained iris classes, i.e, before the classes 0 and 1 are
# conflated) is used for both clf and clf1
n_cv = 2
cv = StratifiedKFold(n_cv)
precomputed_folds = list(cv.split(train, target))
# Train clf on the original dataset where classes 0 and 1 are separated
clf = LogisticRegressionCV(cv=precomputed_folds)
clf.fit(train, target)
# Conflate classes 0 and 1 and train clf1 on this modified dataset
clf1 = LogisticRegressionCV(cv=precomputed_folds)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
# Ensure that what OvR learns for class2 is same regardless of whether
# classes 0 and 1 are separated or not
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, n_cv, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10,))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, n_cv, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg', 'sag', 'saga']:
max_iter = 2000 if solver in ['sag', 'saga'] else 15
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=max_iter,
random_state=42, tol=1e-5 if solver in ['sag', 'saga'] else 1e-2,
cv=2)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, n_cv, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10,))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, n_cv, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
lib = LogisticRegression(fit_intercept=False)
sag = LogisticRegression(solver='sag', fit_intercept=False,
random_state=42)
saga = LogisticRegression(solver='saga', fit_intercept=False,
random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
saga.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(saga.coef_, sag.coef_, decimal=3)
assert_array_almost_equal(saga.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(saga.coef_, ncg.coef_, decimal=3)
assert_array_almost_equal(saga.coef_, lib.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
tol = 1e-7
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False, tol=tol)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False, tol=tol)
lib = LogisticRegression(fit_intercept=False, tol=tol)
sag = LogisticRegression(solver='sag', fit_intercept=False, tol=tol,
max_iter=1000, random_state=42)
saga = LogisticRegression(solver='saga', fit_intercept=False, tol=tol,
max_iter=10000, random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
saga.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(saga.coef_, sag.coef_, decimal=4)
assert_array_almost_equal(saga.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(saga.coef_, ncg.coef_, decimal=4)
assert_array_almost_equal(saga.coef_, lib.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
for weight in [{0: 0.1, 1: 0.2}, {0: 0.1, 1: 0.2, 2: 0.5}]:
n_classes = len(weight)
for class_weight in (weight, 'balanced'):
X, y = make_classification(n_samples=30, n_features=3,
n_repeated=0,
n_informative=3, n_redundant=0,
n_classes=n_classes, random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', Cs=1,
fit_intercept=False,
class_weight=class_weight)
clf_ncg = LogisticRegressionCV(solver='newton-cg', Cs=1,
fit_intercept=False,
class_weight=class_weight)
clf_lib = LogisticRegressionCV(solver='liblinear', Cs=1,
fit_intercept=False,
class_weight=class_weight)
clf_sag = LogisticRegressionCV(solver='sag', Cs=1,
fit_intercept=False,
class_weight=class_weight,
tol=1e-5, max_iter=10000,
random_state=0)
clf_saga = LogisticRegressionCV(solver='saga', Cs=1,
fit_intercept=False,
class_weight=class_weight,
tol=1e-5, max_iter=10000,
random_state=0)
clf_lbf.fit(X, y)
clf_ncg.fit(X, y)
clf_lib.fit(X, y)
clf_sag.fit(X, y)
clf_saga.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_ncg.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_sag.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_saga.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_sample_weights():
X, y = make_classification(n_samples=20, n_features=5, n_informative=3,
n_classes=2, random_state=0)
sample_weight = y + 1
for LR in [LogisticRegression, LogisticRegressionCV]:
# Test that passing sample_weight as ones is the same as
# not passing them at all (default None)
for solver in ['lbfgs', 'liblinear']:
clf_sw_none = LR(solver=solver, fit_intercept=False,
random_state=42)
clf_sw_none.fit(X, y)
clf_sw_ones = LR(solver=solver, fit_intercept=False,
random_state=42)
clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(
clf_sw_none.coef_, clf_sw_ones.coef_, decimal=4)
# Test that sample weights work the same with the lbfgs,
# newton-cg, and 'sag' solvers
clf_sw_lbfgs = LR(solver='lbfgs', fit_intercept=False, random_state=42)
clf_sw_lbfgs.fit(X, y, sample_weight=sample_weight)
clf_sw_n = LR(solver='newton-cg', fit_intercept=False, random_state=42)
clf_sw_n.fit(X, y, sample_weight=sample_weight)
clf_sw_sag = LR(solver='sag', fit_intercept=False, tol=1e-10,
random_state=42)
# ignore convergence warning due to small dataset
with ignore_warnings():
clf_sw_sag.fit(X, y, sample_weight=sample_weight)
clf_sw_liblinear = LR(solver='liblinear', fit_intercept=False,
random_state=42)
clf_sw_liblinear.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_n.coef_, decimal=4)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_sag.coef_, decimal=4)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_liblinear.coef_, decimal=4)
# Test that passing class_weight as [1,2] is the same as
# passing class weight = [1,1] but adjusting sample weights
# to be 2 for all instances of class 2
for solver in ['lbfgs', 'liblinear']:
clf_cw_12 = LR(solver=solver, fit_intercept=False,
class_weight={0: 1, 1: 2}, random_state=42)
clf_cw_12.fit(X, y)
clf_sw_12 = LR(solver=solver, fit_intercept=False, random_state=42)
clf_sw_12.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(
clf_cw_12.coef_, clf_sw_12.coef_, decimal=4)
# Test the above for l1 penalty and l2 penalty with dual=True.
# since the patched liblinear code is different.
clf_cw = LogisticRegression(
solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2},
penalty="l1", tol=1e-5, random_state=42)
clf_cw.fit(X, y)
clf_sw = LogisticRegression(
solver="liblinear", fit_intercept=False, penalty="l1", tol=1e-5,
random_state=42)
clf_sw.fit(X, y, sample_weight)
assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)
clf_cw = LogisticRegression(
solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2},
penalty="l2", dual=True, random_state=42)
clf_cw.fit(X, y)
clf_sw = LogisticRegression(
solver="liblinear", fit_intercept=False, penalty="l2", dual=True,
random_state=42)
clf_sw.fit(X, y, sample_weight)
assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)
def _compute_class_weight_dictionary(y):
# helper for returning a dictionary instead of an array
classes = np.unique(y)
class_weight = compute_class_weight("balanced", classes, y)
class_weight_dict = dict(zip(classes, class_weight))
return class_weight_dict
def test_logistic_regression_class_weights():
# Multinomial case: remove 90% of class 0
X = iris.data[45:, :]
y = iris.target[45:]
solvers = ("lbfgs", "newton-cg")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(solver=solver, multi_class="multinomial",
class_weight="balanced")
clf2 = LogisticRegression(solver=solver, multi_class="multinomial",
class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=4)
# Binary case: remove 90% of class 0 and 100% of class 2
X = iris.data[45:100, :]
y = iris.target[45:100]
solvers = ("lbfgs", "newton-cg", "liblinear")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(solver=solver, multi_class="ovr",
class_weight="balanced")
clf2 = LogisticRegression(solver=solver, multi_class="ovr",
class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=6)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20, random_state=0)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
# 'lbfgs' is used as a referenced
solver = 'lbfgs'
ref_i = LogisticRegression(solver=solver, multi_class='multinomial')
ref_w = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
ref_i.fit(X, y)
ref_w.fit(X, y)
assert_array_equal(ref_i.coef_.shape, (n_classes, n_features))
assert_array_equal(ref_w.coef_.shape, (n_classes, n_features))
for solver in ['sag', 'saga', 'newton-cg']:
clf_i = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=2000, tol=1e-7,
)
clf_w = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=2000, tol=1e-7,
fit_intercept=False)
clf_i.fit(X, y)
clf_w.fit(X, y)
assert_array_equal(clf_i.coef_.shape, (n_classes, n_features))
assert_array_equal(clf_w.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and the other solvers
assert_almost_equal(ref_i.coef_, clf_i.coef_, decimal=3)
assert_almost_equal(ref_w.coef_, clf_w.coef_, decimal=3)
assert_almost_equal(ref_i.intercept_, clf_i.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg', 'sag', 'saga']:
clf_path = LogisticRegressionCV(solver=solver, max_iter=2000, tol=1e-6,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, ref_i.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, ref_i.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5, random_state=0)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5, random_state=0)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_saga_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5, random_state=0)
clf = LogisticRegressionCV(solver='saga')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_l1():
# Because liblinear penalizes the intercept and saga does not, we do not
# fit the intercept to make it possible to compare the coefficients of
# the two models at convergence.
rng = np.random.RandomState(42)
n_samples = 50
X, y = make_classification(n_samples=n_samples, n_features=20,
random_state=0)
X_noise = rng.normal(size=(n_samples, 3))
X_constant = np.ones(shape=(n_samples, 2))
X = np.concatenate((X, X_noise, X_constant), axis=1)
lr_liblinear = LogisticRegression(penalty="l1", C=1.0, solver='liblinear',
fit_intercept=False,
tol=1e-10)
lr_liblinear.fit(X, y)
lr_saga = LogisticRegression(penalty="l1", C=1.0, solver='saga',
fit_intercept=False,
max_iter=1000, tol=1e-10)
lr_saga.fit(X, y)
assert_array_almost_equal(lr_saga.coef_, lr_liblinear.coef_)
# Noise and constant features should be regularized to zero by the l1
# penalty
assert_array_almost_equal(lr_liblinear.coef_[0, -5:], np.zeros(5))
assert_array_almost_equal(lr_saga.coef_[0, -5:], np.zeros(5))
def test_logreg_l1_sparse_data():
# Because liblinear penalizes the intercept and saga does not, we do not
# fit the intercept to make it possible to compare the coefficients of
# the two models at convergence.
rng = np.random.RandomState(42)
n_samples = 50
X, y = make_classification(n_samples=n_samples, n_features=20,
random_state=0)
X_noise = rng.normal(scale=0.1, size=(n_samples, 3))
X_constant = np.zeros(shape=(n_samples, 2))
X = np.concatenate((X, X_noise, X_constant), axis=1)
X[X < 1] = 0
X = sparse.csr_matrix(X)
lr_liblinear = LogisticRegression(penalty="l1", C=1.0, solver='liblinear',
fit_intercept=False,
tol=1e-10)
lr_liblinear.fit(X, y)
lr_saga = LogisticRegression(penalty="l1", C=1.0, solver='saga',
fit_intercept=False,
max_iter=1000, tol=1e-10)
lr_saga.fit(X, y)
assert_array_almost_equal(lr_saga.coef_, lr_liblinear.coef_)
# Noise and constant features should be regularized to zero by the l1
# penalty
assert_array_almost_equal(lr_liblinear.coef_[0, -5:], np.zeros(5))
assert_array_almost_equal(lr_saga.coef_[0, -5:], np.zeros(5))
# Check that solving on the sparse and dense data yield the same results
lr_saga_dense = LogisticRegression(penalty="l1", C=1.0, solver='saga',
fit_intercept=False,
max_iter=1000, tol=1e-10)
lr_saga_dense.fit(X.toarray(), y)
assert_array_almost_equal(lr_saga.coef_, lr_saga_dense.coef_)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
def test_logreg_predict_proba_multinomial():
X, y = make_classification(n_samples=10, n_features=20, random_state=0,
n_classes=3, n_informative=10)
# Predicted probabilites using the true-entropy loss should give a
# smaller loss than those using the ovr method.
clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs")
clf_multi.fit(X, y)
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs")
clf_ovr.fit(X, y)
clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X))
assert_greater(clf_ovr_loss, clf_multi_loss)
# Predicted probabilites using the soft-max function should give a
# smaller loss than those using the logistic function.
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X))
assert_greater(clf_wrong_loss, clf_multi_loss)
@ignore_warnings
def test_max_iter():
# Test that the maximum number of iteration is reached
X, y_bin = iris.data, iris.target.copy()
y_bin[y_bin == 2] = 0
solvers = ['newton-cg', 'liblinear', 'sag', 'saga']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for max_iter in range(1, 5):
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
if solver == 'liblinear' and multi_class == 'multinomial':
continue
lr = LogisticRegression(max_iter=max_iter, tol=1e-15,
multi_class=multi_class,
random_state=0, solver=solver)
lr.fit(X, y_bin)
assert_equal(lr.n_iter_[0], max_iter)
def test_n_iter():
# Test that self.n_iter_ has the correct format.
X, y = iris.data, iris.target
y_bin = y.copy()
y_bin[y_bin == 2] = 0
n_Cs = 4
n_cv_fold = 2
for solver in ['newton-cg', 'liblinear', 'sag', 'saga', 'lbfgs']:
# OvR case
n_classes = 1 if solver == 'liblinear' else np.unique(y).shape[0]
clf = LogisticRegression(tol=1e-2, multi_class='ovr',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
n_classes = np.unique(y).shape[0]
clf = LogisticRegressionCV(tol=1e-2, multi_class='ovr',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
# multinomial case
n_classes = 1
if solver in ('liblinear', 'sag', 'saga'):
break
clf = LogisticRegression(tol=1e-2, multi_class='multinomial',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
clf = LogisticRegressionCV(tol=1e-2, multi_class='multinomial',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
def test_warm_start():
# A 1-iteration second fit on same data should give almost same result
# with warm starting, and quite different result without warm starting.
# Warm starting does not work with liblinear solver.
X, y = iris.data, iris.target
solvers = ['newton-cg', 'sag', 'saga']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for warm_start in [True, False]:
for fit_intercept in [True, False]:
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
clf = LogisticRegression(tol=1e-4, multi_class=multi_class,
warm_start=warm_start,
solver=solver,
random_state=42, max_iter=100,
fit_intercept=fit_intercept)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
coef_1 = clf.coef_
clf.max_iter = 1
clf.fit(X, y)
cum_diff = np.sum(np.abs(coef_1 - clf.coef_))
msg = ("Warm starting issue with %s solver in %s mode "
"with fit_intercept=%s and warm_start=%s"
% (solver, multi_class, str(fit_intercept),
str(warm_start)))
if warm_start:
assert_greater(2.0, cum_diff, msg)
else:
assert_greater(cum_diff, 2.0, msg)
def test_saga_vs_liblinear():
iris = load_iris()
X, y = iris.data, iris.target
X = np.concatenate([X] * 10)
y = np.concatenate([y] * 10)
X_bin = X[y <= 1]
y_bin = y[y <= 1] * 2 - 1
X_sparse, y_sparse = make_classification(n_samples=50, n_features=20,
random_state=0)
X_sparse = sparse.csr_matrix(X_sparse)
for (X, y) in ((X_bin, y_bin), (X_sparse, y_sparse)):
for penalty in ['l1', 'l2']:
n_samples = X.shape[0]
# alpha=1e-3 is time consuming
for alpha in np.logspace(-1, 1, 3):
saga = LogisticRegression(
C=1. / (n_samples * alpha),
solver='saga',
multi_class='ovr',
max_iter=200,
fit_intercept=False,
penalty=penalty, random_state=0, tol=1e-24)
liblinear = LogisticRegression(
C=1. / (n_samples * alpha),
solver='liblinear',
multi_class='ovr',
max_iter=200,
fit_intercept=False,
penalty=penalty, random_state=0, tol=1e-24)
saga.fit(X, y)
liblinear.fit(X, y)
# Convergence for alpha=1e-3 is very slow
assert_array_almost_equal(saga.coef_, liblinear.coef_, 3)
| bsd-3-clause |
akionakamura/scikit-learn | benchmarks/bench_plot_svd.py | 325 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
cactusbin/nyt | matplotlib/examples/animation/old_animation/histogram_tkagg.py | 3 | 1847 | """
This example shows how to use a path patch to draw a bunch of
rectangles for an animated histogram
"""
import numpy as np
import matplotlib
matplotlib.use('TkAgg') # do this before importing pylab
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.path as path
fig, ax = plt.subplots()
# histogram our data with numpy
data = np.random.randn(1000)
n, bins = np.histogram(data, 100)
# get the corners of the rectangles for the histogram
left = np.array(bins[:-1])
right = np.array(bins[1:])
bottom = np.zeros(len(left))
top = bottom + n
nrects = len(left)
# here comes the tricky part -- we have to set up the vertex and path
# codes arrays using moveto, lineto and closepoly
# for each rect: 1 for the MOVETO, 3 for the LINETO, 1 for the
# CLOSEPOLY; the vert for the closepoly is ignored but we still need
# it to keep the codes aligned with the vertices
nverts = nrects*(1+3+1)
verts = np.zeros((nverts, 2))
codes = np.ones(nverts, int) * path.Path.LINETO
codes[0::5] = path.Path.MOVETO
codes[4::5] = path.Path.CLOSEPOLY
verts[0::5,0] = left
verts[0::5,1] = bottom
verts[1::5,0] = left
verts[1::5,1] = top
verts[2::5,0] = right
verts[2::5,1] = top
verts[3::5,0] = right
verts[3::5,1] = bottom
barpath = path.Path(verts, codes)
patch = patches.PathPatch(barpath, facecolor='green', edgecolor='yellow', alpha=0.5)
ax.add_patch(patch)
ax.set_xlim(left[0], right[-1])
ax.set_ylim(bottom.min(), top.max())
def animate():
if animate.cnt>=100:
return
animate.cnt += 1
# simulate new data coming in
data = np.random.randn(1000)
n, bins = np.histogram(data, 100)
top = bottom + n
verts[1::5,1] = top
verts[2::5,1] = top
fig.canvas.draw()
fig.canvas.manager.window.after(100, animate)
animate.cnt = 0
fig.canvas.manager.window.after(100, animate)
plt.show()
| unlicense |
crichardson17/starburst_atlas | SFH_comparison/data/Geneva_inst_Rot/Geneva_inst_Rot_0/fullgrid/peaks_reader.py | 1 | 5057 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ---------------------------------------------------
headerloc = "/Users/helen/Documents/Elon/Thesis_Research/github_repo/starburst_atlas/headers_dir/headers.txt"
# ------------------------------------------------------------------------------------------------------
#data files' names from source directory constructed here. default source directory is working directory
numFiles = 3 #change this if you have more/less files
gridFiles = [None]*numFiles
emissionFiles = [None]*numFiles
for i in range(numFiles):
for file in os.listdir('.'):
if file.endswith("{:d}.grd".format(i+1)):
gridFiles[i] = file
#keep track of all the files you'll be importing by printing
#print file
if file.endswith("{:d}.txt".format(i+1)):
emissionFiles[i] = file
#keep track of all the files you'll be importing by printing
#print file
print ("Files names constructed")
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
print("Beginning file import")
for i in range(numFiles):
gridI = [];
with open(gridFiles[i], 'rb') as f:
csvReader = csv.reader(f, delimiter='\t')
for row in csvReader:
gridI.append(row)
gridI = asarray(gridI)
gridI = gridI[1:,6:8]
if ( i == 0 ):
grid = gridI
else :
grid = concatenate((grid,gridI))
for i in range(numFiles):
emissionLineI = [];
with open(emissionFiles[i], 'rb') as f:
csvReader = csv.reader(f, delimiter='\t')
headers = csvReader.next()
for row in csvReader:
emissionLineI.append(row)
emissionLineI = asarray(emissionLineI)
emissionLineI = emissionLineI[:,1:]
if ( i == 0 ):
Emissionlines = emissionLineI
else :
Emissionlines = concatenate((Emissionlines,emissionLineI))
hdens_values = grid[:,1]
phi_values = grid[:,0]
print("Import files complete")
#To fix when hdens > 10
#many of my grids were run off with hdens up to 12 so we needed to cut off part of the data
#first create temorary arrays
print("modifications begun")
hdens_values_2 = empty(shape=[0, 1])
phi_values_2 = empty(shape=[0, 1])
Emissionlines_2 = empty(shape=[0, len(Emissionlines[0,:])])
#save data in range desired to temp arrays
for i in range(len(hdens_values)):
if (float(hdens_values[i]) < 6.100) & (float(phi_values[i]) < 17.100) :
hdens_values_2 = append(hdens_values_2, hdens_values[i])
phi_values_2 = append(phi_values_2, phi_values[i])
Emissionlines_2 = vstack([Emissionlines_2, Emissionlines[i,:]])
#overwrite old arrays
hdens_values = hdens_values_2
phi_values = phi_values_2
Emissionlines = Emissionlines_2
print("modifications complete")
# ---------------------------------------------------
# ---------------------------------------------------
#there are the emission line names properly formatted
print("Importing headers from header file")
headersFile = open(headerloc,'r')
headers = headersFile.read().splitlines()
headersFile.close()
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
savetxt('peaks_Geneva_inst_0', max_values, delimiter='\t')
| gpl-2.0 |
Irsan88/SeqTools | DataProcessing/roda/branches/irna/bin/scripts/plotHomHetRegions.py | 6 | 1692 | import sys
import matplotlib.pyplot as plt
vcfIn = open(sys.argv[1], 'r')
positions = {}
allChroms = []
lengthContigs = {}
for line in vcfIn:
if line[0] == '#':
if line[0:8] == '##contig':
line = line.rstrip().split(',')
chrom = line[0].split('=')[-1]
length = line[1].split('=')[-1]
lengthContigs[chrom] = length
continue
line = line.rstrip().split('\t')
if line[0] not in positions:
positions[line[0]] = []
allChroms.append(line[0])
alleleFreq = line[7].split(';')[1]
alleleFreq = alleleFreq.split('=')
if alleleFreq[0] != 'AF':
print "raar!" + str(alleleFreq)
alleleFreq = alleleFreq[1]
positions[line[0]].append([line[1], alleleFreq])
#subplots = [0, 611, 612, 613, 614, 615, 616]
#subplots = [0, 511, 512, 513, 514, 515]
subplots = [0, 411, 412, 413, 414]
loop = 0
nrFig = 0
allChroms.sort(key=lambda x: x[0])
fig = plt.figure()
for chrom in allChroms:
loop = loop + 1
if loop == 5:
nrFig = nrFig + 1
plt.savefig(sys.argv[2] + "_" + str(nrFig) + '.png')
fig = plt.figure()
loop = 1
print chrom
posx = []
posy = []
for pos in positions[chrom]:
freqs = pos[1].split(',')
lengthfreqs = len(freqs)
for i in range(0,lengthfreqs):
posx.append(int(pos[0]))
posy.append(float(freqs[i]))
ax = fig.add_subplot(subplots[loop])
ax.scatter(posx, posy, s=2, facecolor='0.5', lw = 0)
ax.set_ylabel('AF ' + str(chrom))
#ax.set_xlabel('Position on ' + str(chrom))
ax.set_ylim(-0.1,1.1)
ax.set_xlim(0, int(lengthContigs[chrom]))
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(10)
nrFig = nrFig + 1
plt.savefig(sys.argv[2] + "_" + str(nrFig) + '.png')
| gpl-2.0 |
jgabriellima/mining | mining/test/test_mining_utils.py | 7 | 4972 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from datetime import date, datetime
from decimal import Decimal
from pandas import tslib, DataFrame
from mining.utils import slugfy
from mining.utils._pandas import fix_type, fix_render, df_generate
class df_slugfy_test(unittest.TestCase):
def test_generate_simples(self):
self.assertEquals(u"testamdo-slugfy", slugfy(u"Testamdo slugfy"))
def test_used_accents(self):
self.assertEquals(u"testando-se-e-slugfy",
slugfy(u"Testando sé é slugfy"))
class fix_type_test(unittest.TestCase):
def test_str_latin1(self):
self.assertEquals(fix_type("test".encode('latin1')), u"test")
self.assertEqual(type(fix_type("test".encode("latin1"))), unicode)
def test_str(self):
self.assertEquals(fix_type("test"), u"test")
self.assertEqual(type(fix_type("test")), unicode)
def test_timestamp(self):
t = tslib.Timestamp.now()
self.assertEquals(fix_type(t), t.strftime("%Y-%m-%d %H:%M:%S"))
self.assertEquals(type(fix_type(t)), str)
def test_datetime(self):
d = datetime.now()
self.assertEquals(fix_type(d), d.strftime("%Y-%m-%d"))
self.assertEquals(type(fix_type(d)), str)
def test_date(self):
d = date.today()
self.assertEquals(fix_type(d), d.strftime("%Y-%m-%d"))
self.assertEquals(type(fix_type(d)), str)
def test_decimal(self):
d = Decimal(10.10)
self.assertEquals(fix_type(d), float(10.1))
self.assertEquals(type(fix_type(d)), float)
class fix_render_test(unittest.TestCase):
def test_render_dict(self):
data = [{'h': 1, 'v': 'a'}, {'h': 2, 'v': 'b'}]
self.assertEquals(fix_render(data[0]), {'h': 1, 'v': u'a'})
self.assertEquals(fix_render(data[1]), {'h': 2, 'v': u'b'})
class df_generate_test(unittest.TestCase):
def setUp(self):
self.df = DataFrame([
{'date': '2014-01-01', 'int': 1, 'str': 'Angular'},
{'date': '2014-02-01', 'int': 2, 'str': 'Credit'},
{'date': '2014-03-01', 'int': 3, 'str': 'Diamon'}])
class df_generate_between_test(df_generate_test):
def test_between_date(self):
g = df_generate(self.df, "2014-01-01:2014-02-01",
"filter__date__between__date__:Y-:m-:d")
self.assertEquals(g, u"date in ['2014-01-01', '2014-01-02', "
"'2014-01-03', '2014-01-04', '2014-01-05', "
"'2014-01-06', '2014-01-07', '2014-01-08', "
"'2014-01-09', '2014-01-10', '2014-01-11', "
"'2014-01-12', '2014-01-13', '2014-01-14', "
"'2014-01-15', '2014-01-16', '2014-01-17', "
"'2014-01-18', '2014-01-19', '2014-01-20', "
"'2014-01-21', '2014-01-22', '2014-01-23', "
"'2014-01-24', '2014-01-25', '2014-01-26', "
"'2014-01-27', '2014-01-28', '2014-01-29', "
"'2014-01-30', '2014-01-31', '2014-02-01']")
class df_generate_in_test(df_generate_test):
def test_in_str(self):
g = df_generate(self.df, "1,2,3", "filter__int__in")
self.assertEquals(g, u"int in ['1', '2', '3']")
def test_in_str_text(self):
g = df_generate(self.df, "Diamond,Angular", "filter__str__in__str")
self.assertEquals(g, u"str in ['Diamond', 'Angular']")
def test_in_int(self):
g = df_generate(self.df, "1,2,3", "filter__int__in__int")
self.assertEquals(g, u"int in [1, 2, 3]")
class df_generate_notin_test(df_generate_test):
def test_notin_str(self):
g = df_generate(self.df, "1,2,3", "filter__int__notin")
self.assertEquals(g, u"['1', '2', '3'] not in int")
def test_notin_int(self):
g = df_generate(self.df, "1,2,3", "filter__int__notin__int")
self.assertEquals(g, u"[1, 2, 3] not in int")
class df_generate_is_test(df_generate_test):
def test_is(self):
g = df_generate(self.df, "2014-01-01", "filter__date")
self.assertEquals(g, u"date == '2014-01-01'")
def test_is_type_str_text(self):
g = df_generate(self.df, "Diamon", "filter__nivel__is__str")
self.assertEquals(g, u"nivel == 'Diamon'")
def test_is_type_int(self):
g = df_generate(self.df, "1", "filter__int__is__int")
self.assertEquals(g, u"int == 1")
def test_is_type_str(self):
g = df_generate(self.df, "1", "filter__int__is__str")
self.assertEquals(g, u"int == '1'")
class df_generate_gte_test(df_generate_test):
def test_gte(self):
g = df_generate(self.df, "1", "filter__int__gte")
self.assertEquals(g, u"int >= 1")
class df_generate_lte_test(df_generate_test):
def test_lte(self):
g = df_generate(self.df, "1", "filter__int__lte")
self.assertEquals(g, u"int <= 1")
| mit |
rexshihaoren/scikit-learn | sklearn/tests/test_naive_bayes.py | 142 | 17496 | import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1]), 2)
assert_equal(clf.predict_proba(X[0]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba(X[1])), 1)
assert_almost_equal(np.sum(clf.predict_proba(X[-1])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([1, 0]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([0, 1, 1, 0, 0, 1])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
| bsd-3-clause |
cjratcliff/variational-dropout | nets.py | 1 | 8268 | from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
import numpy as np
from sklearn.model_selection import train_test_split
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.layers.core import Dropout
from layers import FCVarDropout, Conv2DVarDropout
from loss import sgvlb
from utils import get_minibatches_idx, clip
batch_size = 32
eps = 1e-8
class Net():
def fit(self,X,y,sess):
max_epochs = 20
# Split into training and validation sets
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.33, random_state=42)
for epoch in range(max_epochs):
start = time.time()
train_indices = get_minibatches_idx(len(X_train), batch_size, shuffle=True)
print("\nEpoch %d" % (epoch+1))
train_accs = []
for c,it in enumerate(train_indices):
batch_train_x = [X_train[i] for i in it]
batch_train_y = [y_train[i] for i in it]
feed_dict = {self.x: batch_train_x,
self.y: batch_train_y,
self.deterministic: False}
_,acc = sess.run([self.train_step,self.accuracy], feed_dict)
train_accs.append(acc)
#print(c,len(train_indices),acc)
print("Training accuracy: %.3f" % np.mean(train_accs))
val_pred = self.predict(X_val,sess)
y = np.argmax(y_val,axis=1)
val_acc = np.mean(np.equal(val_pred,y))
print("Val accuracy: %.3f" % val_acc)
print("Time taken: %.3fs" % (time.time() - start))
return
def predict(self,X,sess):
indices = get_minibatches_idx(len(X), batch_size, shuffle=False)
pred = []
for i in indices:
batch_x = [X[j] for j in i]
feed_dict = {self.x: batch_x,
self.deterministic: True}
pred_batch = sess.run(self.pred, feed_dict)
pred.append(pred_batch)
pred = np.concatenate(pred,axis=0)
pred = np.argmax(pred,axis=1)
pred = np.reshape(pred,(-1))
return pred
class LeNet(Net):
def __init__(self, img_size, num_channels, num_classes):
self.x = tf.placeholder(tf.float32, [None,img_size,img_size,num_channels], 'x')
self.y = tf.placeholder(tf.float32, [None,num_classes], 'y')
self.deterministic = tf.placeholder(tf.bool, name='d')
h = Conv2D(32, kernel_size=(3,3),
activation='relu',
input_shape=[None,img_size,img_size,num_channels])(self.x)
h = Conv2D(64, (3, 3), activation='relu')(h)
h = MaxPooling2D(pool_size=(2,2))(h)
h = Flatten()(h)
h = Dense(500, activation='relu')(h)
self.pred = Dense(num_classes, activation='softmax')(h)
pred = tf.clip_by_value(self.pred,eps,1-eps)
loss = -tf.reduce_sum(tf.log(pred)*self.y)
correct_prediction = tf.equal(tf.argmax(self.y, 1), tf.argmax(self.pred, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')
optimizer = tf.train.AdamOptimizer()
self.train_step = optimizer.minimize(loss)
class LeNetVarDropout(Net):
def __init__(self, img_size, num_channels, num_classes):
self.x = tf.placeholder(tf.float32, [None,img_size,img_size,num_channels], 'x')
self.y = tf.placeholder(tf.float32, [None,num_classes], 'y')
self.deterministic = tf.placeholder(tf.bool, name='d')
d = self.deterministic
h = Conv2DVarDropout(num_channels, 32, (3,3), strides=(1,1))(self.x,d)
h = Conv2DVarDropout(32, 64, (3,3), strides=(1,1))(h,d)
h = MaxPooling2D(pool_size=(2,2))(h)
h = Flatten()(h)
if num_channels == 1:
#h = FCVarDropout(9216,500)(h,d)
h = Dense(500)(h)
elif num_channels == 3:
h = FCVarDropout(12544,500)(h,d)
else:
raise NotImplementedError
#self.pred = FCVarDropout(500,num_classes,tf.nn.softmax)(h,d)
self.pred = Dense(num_classes,activation='softmax')(h)
pred = tf.clip_by_value(self.pred,eps,1-eps)
W = tf.get_collection('W')
log_sigma2 = tf.get_collection('log_sigma2')
loss = sgvlb(pred, self.y, W, log_sigma2, batch_size, rw=1)
correct_prediction = tf.equal(tf.argmax(self.y, 1), tf.argmax(self.pred, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')
optimizer = tf.train.AdamOptimizer()
self.train_step = optimizer.minimize(loss)
class VGG(Net):
def __init__(self, img_size, num_channels, num_classes, dropout_prob=0.0):
# Based on https://github.com/fchollet/keras/blob/master/keras/applications/vgg16.py
self.x = tf.placeholder(tf.float32, [None,img_size,img_size,num_channels], 'x')
self.y = tf.placeholder(tf.float32, [None,num_classes], 'y')
self.deterministic = tf.placeholder(tf.bool, name='d')
d = self.deterministic
phase = tf.logical_not(d)
def conv_bn(h, num_filters, phase):
h = Conv2D(num_filters, (3,3), padding='same')(h) # Linear
h = tf.contrib.layers.batch_norm(h, center=True, scale=False, is_training=phase)
return tf.nn.relu(h)
# Block 1
h = conv_bn(self.x,64,phase)
h = conv_bn(h,64,phase)
h = MaxPooling2D((2, 2), strides=(2,2))(h)
# Block 2
h = conv_bn(h,128,phase)
h = conv_bn(h,128,phase)
h = MaxPooling2D((2, 2), strides=(2,2))(h)
# Block 3
h = conv_bn(h,256,phase)
h = conv_bn(h,256,phase)
h = conv_bn(h,256,phase)
h = MaxPooling2D((2,2), strides=(2,2))(h)
# Block 4
h = conv_bn(h,512,phase)
h = conv_bn(h,512,phase)
h = conv_bn(h,512,phase)
h = MaxPooling2D((2,2), strides=(2,2))(h)
# Block 5
h = conv_bn(h,512,phase)
h = conv_bn(h,512,phase)
h = conv_bn(h,512,phase)
h = MaxPooling2D((2,2), strides=(2,2))(h)
h = Flatten()(h)
self.pred = Dense(num_classes, activation='softmax')(h)
pred = tf.clip_by_value(self.pred,eps,1-eps)
loss = -tf.reduce_sum(tf.log(pred)*self.y)
correct_prediction = tf.equal(tf.argmax(self.y, 1), tf.argmax(self.pred, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')
optimizer = tf.train.AdamOptimizer(0.001)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
# Ensures that we execute the update_ops before performing the train_step
self.train_step = optimizer.minimize(loss)
class VGGVarDropout(Net):
def __init__(self, img_size, num_channels, num_classes):
# Based on https://github.com/fchollet/keras/blob/master/keras/applications/vgg16.py
self.x = tf.placeholder(tf.float32, [None,img_size,img_size,num_channels], 'x')
self.y = tf.placeholder(tf.float32, [None,num_classes], 'y')
self.deterministic = tf.placeholder(tf.bool, name='d')
d = self.deterministic
phase = tf.logical_not(d)
def conv_bn(h, filters_in, filters_out, d, phase):
h = Conv2DVarDropout(filters_in, filters_out, (3,3), padding='SAME', nonlinearity=tf.identity)(h,d) # Linear
h = tf.contrib.layers.batch_norm(h, center=True, scale=False, is_training=phase)
return tf.nn.relu(h)
# Block 1
h = conv_bn(self.x, num_channels, 64, d, phase)
h = conv_bn(h, 64, 64, d, phase)
h = MaxPooling2D((2, 2), strides=(2,2))(h)
# Block 2
h = conv_bn(h, 64, 128, d, phase)
h = conv_bn(h, 128, 128, d, phase)
h = MaxPooling2D((2, 2), strides=(2,2))(h)
# Block 3
h = conv_bn(h, 128, 256, d, phase)
h = conv_bn(h, 256, 256, d, phase)
h = conv_bn(h, 256, 256, d, phase)
h = MaxPooling2D((2,2), strides=(2,2))(h)
# Block 4
h = conv_bn(h, 256, 512, d, phase)
h = conv_bn(h, 512, 512, d, phase)
h = conv_bn(h, 512, 512, d, phase)
h = MaxPooling2D((2, 2), strides=(2, 2))(h)
# Block 5
h = conv_bn(h, 512, 512, d, phase)
h = conv_bn(h, 512, 512, d, phase)
h = conv_bn(h, 512, 512, d, phase)
h = MaxPooling2D((2, 2), strides=(2, 2))(h)
h = Flatten()(h)
self.pred = FCVarDropout(512, num_classes, tf.nn.softmax)(h,d)
pred = tf.clip_by_value(self.pred,eps,1-eps)
W = tf.get_collection('W')
log_sigma2 = tf.get_collection('log_sigma2')
loss = sgvlb(pred, self.y, W, log_sigma2, batch_size)
correct_prediction = tf.equal(tf.argmax(self.y, 1), tf.argmax(self.pred, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')
optimizer = tf.train.AdamOptimizer(0.0001)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
# Ensures that we execute the update_ops before performing the train_step
self.train_step = optimizer.minimize(loss)
| gpl-3.0 |
necromuralist/student_intervention | student_intervention/preparing_data.py | 1 | 3505 |
# python standard library
import pickle
# third party
import pandas
from sklearn.cross_validation import train_test_split
# this code
from common import (student_data, feature_map, TrainTestData,
train_test_path, RANDOM_STATE)
# Extract feature (X) and target (y) columns
feature_columns = list(student_data.columns[:-1]) # all columns but last are features
target_column = student_data.columns[-1] # last column is the target/label
X_all = student_data[feature_columns] # feature values for all students
y_all = student_data[target_column] # corresponding targets/labels
assert len(y_all) == 395, "Expected: 395 Actual: {1}".format(len(y_all))
for column_name in sorted(feature_columns):
column = X_all[column_name]
dtype = column.dtype
examples = (', '.join(sorted(column.unique())) if dtype == object else
', '.join([str(item) for item in sorted(column.unique())]) if len(column.unique()) < 10
else
"{0} ... {1}".format(column.min(), column.max()))
print(' {0};{1};{2}'.format(column_name,
feature_map[column_name],
examples))
def preprocess_features(X):
"""
Converts categorical data to numeric
:param:
- `X`: dataframe of data
:return: data with yes/no changed to 1/0, others changed to dummies
"""
outX = pandas.DataFrame(index=X.index)
# Check each column
for col, col_data in X.iteritems():
# If data type is non-numeric, try to replace all yes/no values with 1/0
if col_data.dtype == object:
col_data = col_data.replace(['yes', 'no'], [1, 0])
# Note: This should change the data type for yes/no columns to int
# If still non-numeric, convert to one or more dummy variables
if col_data.dtype == object:
col_data = pandas.get_dummies(col_data, prefix=col) # e.g. 'school' => 'school_GP', 'school_MS'
outX = outX.join(col_data) # collect column(s) in output dataframe
return outX
X_all = preprocess_features(X_all)
y_all = y_all.replace(['yes', 'no'], [1, 0])
assert len(y_all) == 395, "Expected: 395 Actual: {0}".format(len(y_all))
original_columns = len(feature_columns)
with_dummies = len(X_all.columns)
print(" * Original Feature Columns: {0}".format(original_columns))
print(" * With Dummies: {0}".format(with_dummies))
print("\nWith dummy variables there are now {0} more columns in the feature data.".format(with_dummies - original_columns))
# First, decide how many training vs test samples you want
num_all = student_data.shape[0] # same as len(student_data)
assert num_all == 395, "Expected: 395 Actual: {0}".format(num_all)
num_train = 300 # about 75% of the data
num_test = num_all - num_train
X_train, X_test, y_train, y_test = train_test_split(X_all, y_all,
test_size=num_test,
train_size=num_train,
random_state=RANDOM_STATE)
assert len(y_train) == 300
assert len(y_test) == 95
data = TrainTestData(X_train = X_train,
X_test = X_test,
y_train = y_train,
y_test = y_test)
with open(train_test_path, 'wb') as pickler:
pickle.dump(data, pickler)
print(" Training Instances,{0}".format(X_train.shape[0]))
print(" Test Instances,{0}".format(X_test.shape[0])) | mit |
lancezlin/ml_template_py | lib/python2.7/site-packages/matplotlib/_cm.py | 4 | 93997 | """
Nothing here but dictionaries for generating LinearSegmentedColormaps,
and a dictionary of these dictionaries.
Documentation for each is in pyplot.colormaps()
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
_binary_data = {
'red': ((0., 1., 1.), (1., 0., 0.)),
'green': ((0., 1., 1.), (1., 0., 0.)),
'blue': ((0., 1., 1.), (1., 0., 0.))
}
_autumn_data = {'red': ((0., 1.0, 1.0), (1.0, 1.0, 1.0)),
'green': ((0., 0., 0.), (1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.), (1.0, 0., 0.))}
_bone_data = {'red': ((0., 0., 0.),
(0.746032, 0.652778, 0.652778),
(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),
(0.365079, 0.319444, 0.319444),
(0.746032, 0.777778, 0.777778),
(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),
(0.365079, 0.444444, 0.444444),
(1.0, 1.0, 1.0))}
_cool_data = {'red': ((0., 0., 0.), (1.0, 1.0, 1.0)),
'green': ((0., 1., 1.), (1.0, 0., 0.)),
'blue': ((0., 1., 1.), (1.0, 1., 1.))}
_copper_data = {'red': ((0., 0., 0.),
(0.809524, 1.000000, 1.000000),
(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),
(1.0, 0.7812, 0.7812)),
'blue': ((0., 0., 0.),
(1.0, 0.4975, 0.4975))}
_flag_data = {
'red': lambda x: 0.75 * np.sin((x * 31.5 + 0.25) * np.pi) + 0.5,
'green': lambda x: np.sin(x * 31.5 * np.pi),
'blue': lambda x: 0.75 * np.sin((x * 31.5 - 0.25) * np.pi) + 0.5,
}
_prism_data = {
'red': lambda x: 0.75 * np.sin((x * 20.9 + 0.25) * np.pi) + 0.67,
'green': lambda x: 0.75 * np.sin((x * 20.9 - 0.25) * np.pi) + 0.33,
'blue': lambda x: -1.1 * np.sin((x * 20.9) * np.pi),
}
def cubehelix(gamma=1.0, s=0.5, r=-1.5, h=1.0):
"""Return custom data dictionary of (r,g,b) conversion functions, which
can be used with :func:`register_cmap`, for the cubehelix color scheme.
Unlike most other color schemes cubehelix was designed by D.A. Green to
be monotonically increasing in terms of perceived brightness.
Also, when printed on a black and white postscript printer, the scheme
results in a greyscale with monotonically increasing brightness.
This color scheme is named cubehelix because the r,g,b values produced
can be visualised as a squashed helix around the diagonal in the
r,g,b color cube.
For a unit color cube (i.e. 3-D coordinates for r,g,b each in the
range 0 to 1) the color scheme starts at (r,g,b) = (0,0,0), i.e. black,
and finishes at (r,g,b) = (1,1,1), i.e. white. For some fraction *x*,
between 0 and 1, the color is the corresponding grey value at that
fraction along the black to white diagonal (x,x,x) plus a color
element. This color element is calculated in a plane of constant
perceived intensity and controlled by the following parameters.
Optional keyword arguments:
========= =======================================================
Keyword Description
========= =======================================================
gamma gamma factor to emphasise either low intensity values
(gamma < 1), or high intensity values (gamma > 1);
defaults to 1.0.
s the start color; defaults to 0.5 (i.e. purple).
r the number of r,g,b rotations in color that are made
from the start to the end of the color scheme; defaults
to -1.5 (i.e. -> B -> G -> R -> B).
h the hue parameter which controls how saturated the
colors are. If this parameter is zero then the color
scheme is purely a greyscale; defaults to 1.0.
========= =======================================================
"""
def get_color_function(p0, p1):
def color(x):
# Apply gamma factor to emphasise low or high intensity values
xg = x ** gamma
# Calculate amplitude and angle of deviation from the black
# to white diagonal in the plane of constant
# perceived intensity.
a = h * xg * (1 - xg) / 2
phi = 2 * np.pi * (s / 3 + r * x)
return xg + a * (p0 * np.cos(phi) + p1 * np.sin(phi))
return color
return {
'red': get_color_function(-0.14861, 1.78277),
'green': get_color_function(-0.29227, -0.90649),
'blue': get_color_function(1.97294, 0.0),
}
_cubehelix_data = cubehelix()
_bwr_data = ((0.0, 0.0, 1.0), (1.0, 1.0, 1.0), (1.0, 0.0, 0.0))
_brg_data = ((0.0, 0.0, 1.0), (1.0, 0.0, 0.0), (0.0, 1.0, 0.0))
# Gnuplot palette functions
gfunc = {
0: lambda x: 0,
1: lambda x: 0.5,
2: lambda x: 1,
3: lambda x: x,
4: lambda x: x ** 2,
5: lambda x: x ** 3,
6: lambda x: x ** 4,
7: lambda x: np.sqrt(x),
8: lambda x: np.sqrt(np.sqrt(x)),
9: lambda x: np.sin(x * np.pi / 2),
10: lambda x: np.cos(x * np.pi / 2),
11: lambda x: np.abs(x - 0.5),
12: lambda x: (2 * x - 1) ** 2,
13: lambda x: np.sin(x * np.pi),
14: lambda x: np.abs(np.cos(x * np.pi)),
15: lambda x: np.sin(x * 2 * np.pi),
16: lambda x: np.cos(x * 2 * np.pi),
17: lambda x: np.abs(np.sin(x * 2 * np.pi)),
18: lambda x: np.abs(np.cos(x * 2 * np.pi)),
19: lambda x: np.abs(np.sin(x * 4 * np.pi)),
20: lambda x: np.abs(np.cos(x * 4 * np.pi)),
21: lambda x: 3 * x,
22: lambda x: 3 * x - 1,
23: lambda x: 3 * x - 2,
24: lambda x: np.abs(3 * x - 1),
25: lambda x: np.abs(3 * x - 2),
26: lambda x: (3 * x - 1) / 2,
27: lambda x: (3 * x - 2) / 2,
28: lambda x: np.abs((3 * x - 1) / 2),
29: lambda x: np.abs((3 * x - 2) / 2),
30: lambda x: x / 0.32 - 0.78125,
31: lambda x: 2 * x - 0.84,
32: lambda x: gfunc32(x),
33: lambda x: np.abs(2 * x - 0.5),
34: lambda x: 2 * x,
35: lambda x: 2 * x - 0.5,
36: lambda x: 2 * x - 1.
}
def gfunc32(x):
ret = np.zeros(len(x))
m = (x < 0.25)
ret[m] = 4 * x[m]
m = (x >= 0.25) & (x < 0.92)
ret[m] = -2 * x[m] + 1.84
m = (x >= 0.92)
ret[m] = x[m] / 0.08 - 11.5
return ret
_gnuplot_data = {
'red': gfunc[7],
'green': gfunc[5],
'blue': gfunc[15],
}
_gnuplot2_data = {
'red': gfunc[30],
'green': gfunc[31],
'blue': gfunc[32],
}
_ocean_data = {
'red': gfunc[23],
'green': gfunc[28],
'blue': gfunc[3],
}
_afmhot_data = {
'red': gfunc[34],
'green': gfunc[35],
'blue': gfunc[36],
}
_rainbow_data = {
'red': gfunc[33],
'green': gfunc[13],
'blue': gfunc[10],
}
_seismic_data = (
(0.0, 0.0, 0.3), (0.0, 0.0, 1.0),
(1.0, 1.0, 1.0), (1.0, 0.0, 0.0),
(0.5, 0.0, 0.0))
_terrain_data = (
(0.00, (0.2, 0.2, 0.6)),
(0.15, (0.0, 0.6, 1.0)),
(0.25, (0.0, 0.8, 0.4)),
(0.50, (1.0, 1.0, 0.6)),
(0.75, (0.5, 0.36, 0.33)),
(1.00, (1.0, 1.0, 1.0)))
_gray_data = {'red': ((0., 0, 0), (1., 1, 1)),
'green': ((0., 0, 0), (1., 1, 1)),
'blue': ((0., 0, 0), (1., 1, 1))}
_hot_data = {'red': ((0., 0.0416, 0.0416),
(0.365079, 1.000000, 1.000000),
(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),
(0.365079, 0.000000, 0.000000),
(0.746032, 1.000000, 1.000000),
(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),
(0.746032, 0.000000, 0.000000),
(1.0, 1.0, 1.0))}
_hsv_data = {'red': ((0., 1., 1.),
(0.158730, 1.000000, 1.000000),
(0.174603, 0.968750, 0.968750),
(0.333333, 0.031250, 0.031250),
(0.349206, 0.000000, 0.000000),
(0.666667, 0.000000, 0.000000),
(0.682540, 0.031250, 0.031250),
(0.841270, 0.968750, 0.968750),
(0.857143, 1.000000, 1.000000),
(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),
(0.158730, 0.937500, 0.937500),
(0.174603, 1.000000, 1.000000),
(0.507937, 1.000000, 1.000000),
(0.666667, 0.062500, 0.062500),
(0.682540, 0.000000, 0.000000),
(1.0, 0., 0.)),
'blue': ((0., 0., 0.),
(0.333333, 0.000000, 0.000000),
(0.349206, 0.062500, 0.062500),
(0.507937, 1.000000, 1.000000),
(0.841270, 1.000000, 1.000000),
(0.857143, 0.937500, 0.937500),
(1.0, 0.09375, 0.09375))}
_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89, 1, 1),
(1, 0.5, 0.5)),
'green': ((0., 0, 0), (0.125, 0, 0), (0.375, 1, 1), (0.64, 1, 1),
(0.91, 0, 0), (1, 0, 0)),
'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1),
(0.65, 0, 0), (1, 0, 0))}
_pink_data = {'red': ((0., 0.1178, 0.1178), (0.015873, 0.195857, 0.195857),
(0.031746, 0.250661, 0.250661),
(0.047619, 0.295468, 0.295468),
(0.063492, 0.334324, 0.334324),
(0.079365, 0.369112, 0.369112),
(0.095238, 0.400892, 0.400892),
(0.111111, 0.430331, 0.430331),
(0.126984, 0.457882, 0.457882),
(0.142857, 0.483867, 0.483867),
(0.158730, 0.508525, 0.508525),
(0.174603, 0.532042, 0.532042),
(0.190476, 0.554563, 0.554563),
(0.206349, 0.576204, 0.576204),
(0.222222, 0.597061, 0.597061),
(0.238095, 0.617213, 0.617213),
(0.253968, 0.636729, 0.636729),
(0.269841, 0.655663, 0.655663),
(0.285714, 0.674066, 0.674066),
(0.301587, 0.691980, 0.691980),
(0.317460, 0.709441, 0.709441),
(0.333333, 0.726483, 0.726483),
(0.349206, 0.743134, 0.743134),
(0.365079, 0.759421, 0.759421),
(0.380952, 0.766356, 0.766356),
(0.396825, 0.773229, 0.773229),
(0.412698, 0.780042, 0.780042),
(0.428571, 0.786796, 0.786796),
(0.444444, 0.793492, 0.793492),
(0.460317, 0.800132, 0.800132),
(0.476190, 0.806718, 0.806718),
(0.492063, 0.813250, 0.813250),
(0.507937, 0.819730, 0.819730),
(0.523810, 0.826160, 0.826160),
(0.539683, 0.832539, 0.832539),
(0.555556, 0.838870, 0.838870),
(0.571429, 0.845154, 0.845154),
(0.587302, 0.851392, 0.851392),
(0.603175, 0.857584, 0.857584),
(0.619048, 0.863731, 0.863731),
(0.634921, 0.869835, 0.869835),
(0.650794, 0.875897, 0.875897),
(0.666667, 0.881917, 0.881917),
(0.682540, 0.887896, 0.887896),
(0.698413, 0.893835, 0.893835),
(0.714286, 0.899735, 0.899735),
(0.730159, 0.905597, 0.905597),
(0.746032, 0.911421, 0.911421),
(0.761905, 0.917208, 0.917208),
(0.777778, 0.922958, 0.922958),
(0.793651, 0.928673, 0.928673),
(0.809524, 0.934353, 0.934353),
(0.825397, 0.939999, 0.939999),
(0.841270, 0.945611, 0.945611),
(0.857143, 0.951190, 0.951190),
(0.873016, 0.956736, 0.956736),
(0.888889, 0.962250, 0.962250),
(0.904762, 0.967733, 0.967733),
(0.920635, 0.973185, 0.973185),
(0.936508, 0.978607, 0.978607),
(0.952381, 0.983999, 0.983999),
(0.968254, 0.989361, 0.989361),
(0.984127, 0.994695, 0.994695), (1.0, 1.0, 1.0)),
'green': ((0., 0., 0.), (0.015873, 0.102869, 0.102869),
(0.031746, 0.145479, 0.145479),
(0.047619, 0.178174, 0.178174),
(0.063492, 0.205738, 0.205738),
(0.079365, 0.230022, 0.230022),
(0.095238, 0.251976, 0.251976),
(0.111111, 0.272166, 0.272166),
(0.126984, 0.290957, 0.290957),
(0.142857, 0.308607, 0.308607),
(0.158730, 0.325300, 0.325300),
(0.174603, 0.341178, 0.341178),
(0.190476, 0.356348, 0.356348),
(0.206349, 0.370899, 0.370899),
(0.222222, 0.384900, 0.384900),
(0.238095, 0.398410, 0.398410),
(0.253968, 0.411476, 0.411476),
(0.269841, 0.424139, 0.424139),
(0.285714, 0.436436, 0.436436),
(0.301587, 0.448395, 0.448395),
(0.317460, 0.460044, 0.460044),
(0.333333, 0.471405, 0.471405),
(0.349206, 0.482498, 0.482498),
(0.365079, 0.493342, 0.493342),
(0.380952, 0.517549, 0.517549),
(0.396825, 0.540674, 0.540674),
(0.412698, 0.562849, 0.562849),
(0.428571, 0.584183, 0.584183),
(0.444444, 0.604765, 0.604765),
(0.460317, 0.624669, 0.624669),
(0.476190, 0.643958, 0.643958),
(0.492063, 0.662687, 0.662687),
(0.507937, 0.680900, 0.680900),
(0.523810, 0.698638, 0.698638),
(0.539683, 0.715937, 0.715937),
(0.555556, 0.732828, 0.732828),
(0.571429, 0.749338, 0.749338),
(0.587302, 0.765493, 0.765493),
(0.603175, 0.781313, 0.781313),
(0.619048, 0.796819, 0.796819),
(0.634921, 0.812029, 0.812029),
(0.650794, 0.826960, 0.826960),
(0.666667, 0.841625, 0.841625),
(0.682540, 0.856040, 0.856040),
(0.698413, 0.870216, 0.870216),
(0.714286, 0.884164, 0.884164),
(0.730159, 0.897896, 0.897896),
(0.746032, 0.911421, 0.911421),
(0.761905, 0.917208, 0.917208),
(0.777778, 0.922958, 0.922958),
(0.793651, 0.928673, 0.928673),
(0.809524, 0.934353, 0.934353),
(0.825397, 0.939999, 0.939999),
(0.841270, 0.945611, 0.945611),
(0.857143, 0.951190, 0.951190),
(0.873016, 0.956736, 0.956736),
(0.888889, 0.962250, 0.962250),
(0.904762, 0.967733, 0.967733),
(0.920635, 0.973185, 0.973185),
(0.936508, 0.978607, 0.978607),
(0.952381, 0.983999, 0.983999),
(0.968254, 0.989361, 0.989361),
(0.984127, 0.994695, 0.994695), (1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.), (0.015873, 0.102869, 0.102869),
(0.031746, 0.145479, 0.145479),
(0.047619, 0.178174, 0.178174),
(0.063492, 0.205738, 0.205738),
(0.079365, 0.230022, 0.230022),
(0.095238, 0.251976, 0.251976),
(0.111111, 0.272166, 0.272166),
(0.126984, 0.290957, 0.290957),
(0.142857, 0.308607, 0.308607),
(0.158730, 0.325300, 0.325300),
(0.174603, 0.341178, 0.341178),
(0.190476, 0.356348, 0.356348),
(0.206349, 0.370899, 0.370899),
(0.222222, 0.384900, 0.384900),
(0.238095, 0.398410, 0.398410),
(0.253968, 0.411476, 0.411476),
(0.269841, 0.424139, 0.424139),
(0.285714, 0.436436, 0.436436),
(0.301587, 0.448395, 0.448395),
(0.317460, 0.460044, 0.460044),
(0.333333, 0.471405, 0.471405),
(0.349206, 0.482498, 0.482498),
(0.365079, 0.493342, 0.493342),
(0.380952, 0.503953, 0.503953),
(0.396825, 0.514344, 0.514344),
(0.412698, 0.524531, 0.524531),
(0.428571, 0.534522, 0.534522),
(0.444444, 0.544331, 0.544331),
(0.460317, 0.553966, 0.553966),
(0.476190, 0.563436, 0.563436),
(0.492063, 0.572750, 0.572750),
(0.507937, 0.581914, 0.581914),
(0.523810, 0.590937, 0.590937),
(0.539683, 0.599824, 0.599824),
(0.555556, 0.608581, 0.608581),
(0.571429, 0.617213, 0.617213),
(0.587302, 0.625727, 0.625727),
(0.603175, 0.634126, 0.634126),
(0.619048, 0.642416, 0.642416),
(0.634921, 0.650600, 0.650600),
(0.650794, 0.658682, 0.658682),
(0.666667, 0.666667, 0.666667),
(0.682540, 0.674556, 0.674556),
(0.698413, 0.682355, 0.682355),
(0.714286, 0.690066, 0.690066),
(0.730159, 0.697691, 0.697691),
(0.746032, 0.705234, 0.705234),
(0.761905, 0.727166, 0.727166),
(0.777778, 0.748455, 0.748455),
(0.793651, 0.769156, 0.769156),
(0.809524, 0.789314, 0.789314),
(0.825397, 0.808969, 0.808969),
(0.841270, 0.828159, 0.828159),
(0.857143, 0.846913, 0.846913),
(0.873016, 0.865261, 0.865261),
(0.888889, 0.883229, 0.883229),
(0.904762, 0.900837, 0.900837),
(0.920635, 0.918109, 0.918109),
(0.936508, 0.935061, 0.935061),
(0.952381, 0.951711, 0.951711),
(0.968254, 0.968075, 0.968075),
(0.984127, 0.984167, 0.984167), (1.0, 1.0, 1.0))}
_spring_data = {'red': ((0., 1., 1.), (1.0, 1.0, 1.0)),
'green': ((0., 0., 0.), (1.0, 1.0, 1.0)),
'blue': ((0., 1., 1.), (1.0, 0.0, 0.0))}
_summer_data = {'red': ((0., 0., 0.), (1.0, 1.0, 1.0)),
'green': ((0., 0.5, 0.5), (1.0, 1.0, 1.0)),
'blue': ((0., 0.4, 0.4), (1.0, 0.4, 0.4))}
_winter_data = {'red': ((0., 0., 0.), (1.0, 0.0, 0.0)),
'green': ((0., 0., 0.), (1.0, 1.0, 1.0)),
'blue': ((0., 1., 1.), (1.0, 0.5, 0.5))}
_nipy_spectral_data = {
'red': [(0.0, 0.0, 0.0), (0.05, 0.4667, 0.4667),
(0.10, 0.5333, 0.5333), (0.15, 0.0, 0.0),
(0.20, 0.0, 0.0), (0.25, 0.0, 0.0),
(0.30, 0.0, 0.0), (0.35, 0.0, 0.0),
(0.40, 0.0, 0.0), (0.45, 0.0, 0.0),
(0.50, 0.0, 0.0), (0.55, 0.0, 0.0),
(0.60, 0.0, 0.0), (0.65, 0.7333, 0.7333),
(0.70, 0.9333, 0.9333), (0.75, 1.0, 1.0),
(0.80, 1.0, 1.0), (0.85, 1.0, 1.0),
(0.90, 0.8667, 0.8667), (0.95, 0.80, 0.80),
(1.0, 0.80, 0.80)],
'green': [(0.0, 0.0, 0.0), (0.05, 0.0, 0.0),
(0.10, 0.0, 0.0), (0.15, 0.0, 0.0),
(0.20, 0.0, 0.0), (0.25, 0.4667, 0.4667),
(0.30, 0.6000, 0.6000), (0.35, 0.6667, 0.6667),
(0.40, 0.6667, 0.6667), (0.45, 0.6000, 0.6000),
(0.50, 0.7333, 0.7333), (0.55, 0.8667, 0.8667),
(0.60, 1.0, 1.0), (0.65, 1.0, 1.0),
(0.70, 0.9333, 0.9333), (0.75, 0.8000, 0.8000),
(0.80, 0.6000, 0.6000), (0.85, 0.0, 0.0),
(0.90, 0.0, 0.0), (0.95, 0.0, 0.0),
(1.0, 0.80, 0.80)],
'blue': [(0.0, 0.0, 0.0), (0.05, 0.5333, 0.5333),
(0.10, 0.6000, 0.6000), (0.15, 0.6667, 0.6667),
(0.20, 0.8667, 0.8667), (0.25, 0.8667, 0.8667),
(0.30, 0.8667, 0.8667), (0.35, 0.6667, 0.6667),
(0.40, 0.5333, 0.5333), (0.45, 0.0, 0.0),
(0.5, 0.0, 0.0), (0.55, 0.0, 0.0),
(0.60, 0.0, 0.0), (0.65, 0.0, 0.0),
(0.70, 0.0, 0.0), (0.75, 0.0, 0.0),
(0.80, 0.0, 0.0), (0.85, 0.0, 0.0),
(0.90, 0.0, 0.0), (0.95, 0.0, 0.0),
(1.0, 0.80, 0.80)],
}
# 34 colormaps based on color specifications and designs
# developed by Cynthia Brewer (http://colorbrewer.org).
# The ColorBrewer palettes have been included under the terms
# of an Apache-stype license (for details, see the file
# LICENSE_COLORBREWER in the license directory of the matplotlib
# source distribution).
_Accent_data = {'blue': [(0.0, 0.49803921580314636,
0.49803921580314636), (0.14285714285714285, 0.83137255907058716,
0.83137255907058716), (0.2857142857142857, 0.52549022436141968,
0.52549022436141968), (0.42857142857142855, 0.60000002384185791,
0.60000002384185791), (0.5714285714285714, 0.69019609689712524,
0.69019609689712524), (0.7142857142857143, 0.49803921580314636,
0.49803921580314636), (0.8571428571428571, 0.090196080505847931,
0.090196080505847931), (1.0, 0.40000000596046448,
0.40000000596046448)],
'green': [(0.0, 0.78823530673980713, 0.78823530673980713),
(0.14285714285714285, 0.68235296010971069, 0.68235296010971069),
(0.2857142857142857, 0.75294119119644165, 0.75294119119644165),
(0.42857142857142855, 1.0, 1.0), (0.5714285714285714,
0.42352941632270813, 0.42352941632270813), (0.7142857142857143,
0.0078431377187371254, 0.0078431377187371254),
(0.8571428571428571, 0.35686275362968445, 0.35686275362968445),
(1.0, 0.40000000596046448, 0.40000000596046448)],
'red': [(0.0, 0.49803921580314636, 0.49803921580314636),
(0.14285714285714285, 0.7450980544090271, 0.7450980544090271),
(0.2857142857142857, 0.99215686321258545, 0.99215686321258545),
(0.42857142857142855, 1.0, 1.0), (0.5714285714285714,
0.21960784494876862, 0.21960784494876862), (0.7142857142857143,
0.94117647409439087, 0.94117647409439087), (0.8571428571428571,
0.74901962280273438, 0.74901962280273438), (1.0,
0.40000000596046448, 0.40000000596046448)]}
_Blues_data = {'blue': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.93725490570068359, 0.93725490570068359),
(0.375, 0.88235294818878174, 0.88235294818878174), (0.5,
0.83921569585800171, 0.83921569585800171), (0.625, 0.7764706015586853,
0.7764706015586853), (0.75, 0.70980393886566162, 0.70980393886566162),
(0.875, 0.61176472902297974, 0.61176472902297974), (1.0,
0.41960784792900085, 0.41960784792900085)],
'green': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.92156863212585449, 0.92156863212585449), (0.25,
0.85882353782653809, 0.85882353782653809), (0.375,
0.7921568751335144, 0.7921568751335144), (0.5,
0.68235296010971069, 0.68235296010971069), (0.625,
0.57254904508590698, 0.57254904508590698), (0.75,
0.44313725829124451, 0.44313725829124451), (0.875,
0.31764706969261169, 0.31764706969261169), (1.0,
0.18823529779911041, 0.18823529779911041)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87058824300765991, 0.87058824300765991), (0.25,
0.7764706015586853, 0.7764706015586853), (0.375,
0.61960786581039429, 0.61960786581039429), (0.5,
0.41960784792900085, 0.41960784792900085), (0.625,
0.25882354378700256, 0.25882354378700256), (0.75,
0.12941177189350128, 0.12941177189350128), (0.875,
0.031372550874948502, 0.031372550874948502), (1.0,
0.031372550874948502, 0.031372550874948502)]}
_BrBG_data = {'blue': [(0.0, 0.019607843831181526,
0.019607843831181526), (0.10000000000000001, 0.039215687662363052,
0.039215687662363052), (0.20000000000000001, 0.17647059261798859,
0.17647059261798859), (0.29999999999999999, 0.49019607901573181,
0.49019607901573181), (0.40000000000000002, 0.76470589637756348,
0.76470589637756348), (0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.89803922176361084, 0.89803922176361084),
(0.69999999999999996, 0.75686275959014893, 0.75686275959014893),
(0.80000000000000004, 0.56078433990478516, 0.56078433990478516),
(0.90000000000000002, 0.36862745881080627, 0.36862745881080627), (1.0,
0.18823529779911041, 0.18823529779911041)],
'green': [(0.0, 0.18823529779911041, 0.18823529779911041),
(0.10000000000000001, 0.31764706969261169, 0.31764706969261169),
(0.20000000000000001, 0.5058823823928833, 0.5058823823928833),
(0.29999999999999999, 0.7607843279838562, 0.7607843279838562),
(0.40000000000000002, 0.90980392694473267, 0.90980392694473267),
(0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.91764706373214722, 0.91764706373214722),
(0.69999999999999996, 0.80392158031463623, 0.80392158031463623),
(0.80000000000000004, 0.59215688705444336, 0.59215688705444336),
(0.90000000000000002, 0.40000000596046448, 0.40000000596046448),
(1.0, 0.23529411852359772, 0.23529411852359772)],
'red': [(0.0, 0.32941177487373352, 0.32941177487373352),
(0.10000000000000001, 0.54901963472366333, 0.54901963472366333),
(0.20000000000000001, 0.74901962280273438, 0.74901962280273438),
(0.29999999999999999, 0.87450981140136719, 0.87450981140136719),
(0.40000000000000002, 0.96470588445663452, 0.96470588445663452),
(0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.78039216995239258, 0.78039216995239258),
(0.69999999999999996, 0.50196081399917603, 0.50196081399917603),
(0.80000000000000004, 0.20784313976764679, 0.20784313976764679),
(0.90000000000000002, 0.0039215688593685627,
0.0039215688593685627), (1.0, 0.0, 0.0)]}
_BuGn_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.97647058963775635,
0.97647058963775635), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.78823530673980713,
0.78823530673980713), (0.5, 0.64313727617263794, 0.64313727617263794),
(0.625, 0.46274510025978088, 0.46274510025978088), (0.75,
0.27058824896812439, 0.27058824896812439), (0.875,
0.17254902422428131, 0.17254902422428131), (1.0, 0.10588235408067703,
0.10588235408067703)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.96078431606292725, 0.96078431606292725), (0.25,
0.92549020051956177, 0.92549020051956177), (0.375,
0.84705883264541626, 0.84705883264541626), (0.5,
0.7607843279838562, 0.7607843279838562), (0.625,
0.68235296010971069, 0.68235296010971069), (0.75,
0.54509806632995605, 0.54509806632995605), (0.875,
0.42745098471641541, 0.42745098471641541), (1.0,
0.26666668057441711, 0.26666668057441711)], 'red': [(0.0,
0.9686274528503418, 0.9686274528503418), (0.125,
0.89803922176361084, 0.89803922176361084), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.60000002384185791, 0.60000002384185791), (0.5,
0.40000000596046448, 0.40000000596046448), (0.625,
0.25490197539329529, 0.25490197539329529), (0.75,
0.13725490868091583, 0.13725490868091583), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)]}
_BuPu_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.95686274766921997,
0.95686274766921997), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.85490196943283081,
0.85490196943283081), (0.5, 0.7764706015586853, 0.7764706015586853),
(0.625, 0.69411766529083252, 0.69411766529083252), (0.75,
0.61568629741668701, 0.61568629741668701), (0.875,
0.48627451062202454, 0.48627451062202454), (1.0, 0.29411765933036804,
0.29411765933036804)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.92549020051956177, 0.92549020051956177), (0.25,
0.82745099067687988, 0.82745099067687988), (0.375,
0.73725491762161255, 0.73725491762161255), (0.5,
0.58823531866073608, 0.58823531866073608), (0.625,
0.41960784792900085, 0.41960784792900085), (0.75,
0.25490197539329529, 0.25490197539329529), (0.875,
0.058823529630899429, 0.058823529630899429), (1.0, 0.0, 0.0)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.74901962280273438, 0.74901962280273438), (0.375,
0.61960786581039429, 0.61960786581039429), (0.5,
0.54901963472366333, 0.54901963472366333), (0.625,
0.54901963472366333, 0.54901963472366333), (0.75,
0.53333336114883423, 0.53333336114883423), (0.875,
0.5058823823928833, 0.5058823823928833), (1.0,
0.30196079611778259, 0.30196079611778259)]}
_Dark2_data = {'blue': [(0.0, 0.46666666865348816,
0.46666666865348816), (0.14285714285714285, 0.0078431377187371254,
0.0078431377187371254), (0.2857142857142857, 0.70196080207824707,
0.70196080207824707), (0.42857142857142855, 0.54117649793624878,
0.54117649793624878), (0.5714285714285714, 0.11764705926179886,
0.11764705926179886), (0.7142857142857143, 0.0078431377187371254,
0.0078431377187371254), (0.8571428571428571, 0.11372549086809158,
0.11372549086809158), (1.0, 0.40000000596046448,
0.40000000596046448)],
'green': [(0.0, 0.61960786581039429, 0.61960786581039429),
(0.14285714285714285, 0.37254902720451355, 0.37254902720451355),
(0.2857142857142857, 0.43921568989753723, 0.43921568989753723),
(0.42857142857142855, 0.16078431904315948, 0.16078431904315948),
(0.5714285714285714, 0.65098041296005249, 0.65098041296005249),
(0.7142857142857143, 0.67058825492858887, 0.67058825492858887),
(0.8571428571428571, 0.46274510025978088, 0.46274510025978088),
(1.0, 0.40000000596046448, 0.40000000596046448)],
'red': [(0.0, 0.10588235408067703, 0.10588235408067703),
(0.14285714285714285, 0.85098040103912354, 0.85098040103912354),
(0.2857142857142857, 0.45882353186607361, 0.45882353186607361),
(0.42857142857142855, 0.90588235855102539, 0.90588235855102539),
(0.5714285714285714, 0.40000000596046448, 0.40000000596046448),
(0.7142857142857143, 0.90196079015731812, 0.90196079015731812),
(0.8571428571428571, 0.65098041296005249, 0.65098041296005249),
(1.0, 0.40000000596046448, 0.40000000596046448)]}
_GnBu_data = {'blue': [(0.0, 0.94117647409439087,
0.94117647409439087), (0.125, 0.85882353782653809,
0.85882353782653809), (0.25, 0.77254903316497803,
0.77254903316497803), (0.375, 0.70980393886566162,
0.70980393886566162), (0.5, 0.76862746477127075, 0.76862746477127075),
(0.625, 0.82745099067687988, 0.82745099067687988), (0.75,
0.7450980544090271, 0.7450980544090271), (0.875, 0.67450982332229614,
0.67450982332229614), (1.0, 0.5058823823928833, 0.5058823823928833)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.9529411792755127, 0.9529411792755127), (0.25,
0.92156863212585449, 0.92156863212585449), (0.375,
0.86666667461395264, 0.86666667461395264), (0.5,
0.80000001192092896, 0.80000001192092896), (0.625,
0.70196080207824707, 0.70196080207824707), (0.75,
0.54901963472366333, 0.54901963472366333), (0.875,
0.40784314274787903, 0.40784314274787903), (1.0,
0.25098040699958801, 0.25098040699958801)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.65882354974746704, 0.65882354974746704), (0.5,
0.48235294222831726, 0.48235294222831726), (0.625,
0.30588236451148987, 0.30588236451148987), (0.75,
0.16862745583057404, 0.16862745583057404), (0.875,
0.031372550874948502, 0.031372550874948502), (1.0,
0.031372550874948502, 0.031372550874948502)]}
_Greens_data = {'blue': [(0.0, 0.96078431606292725,
0.96078431606292725), (0.125, 0.87843137979507446,
0.87843137979507446), (0.25, 0.75294119119644165,
0.75294119119644165), (0.375, 0.60784316062927246,
0.60784316062927246), (0.5, 0.46274510025978088, 0.46274510025978088),
(0.625, 0.364705890417099, 0.364705890417099), (0.75,
0.27058824896812439, 0.27058824896812439), (0.875,
0.17254902422428131, 0.17254902422428131), (1.0, 0.10588235408067703,
0.10588235408067703)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.96078431606292725, 0.96078431606292725), (0.25,
0.91372549533843994, 0.91372549533843994), (0.375,
0.85098040103912354, 0.85098040103912354), (0.5,
0.76862746477127075, 0.76862746477127075), (0.625,
0.67058825492858887, 0.67058825492858887), (0.75,
0.54509806632995605, 0.54509806632995605), (0.875,
0.42745098471641541, 0.42745098471641541), (1.0,
0.26666668057441711, 0.26666668057441711)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.89803922176361084, 0.89803922176361084), (0.25,
0.78039216995239258, 0.78039216995239258), (0.375,
0.63137257099151611, 0.63137257099151611), (0.5,
0.45490196347236633, 0.45490196347236633), (0.625,
0.25490197539329529, 0.25490197539329529), (0.75,
0.13725490868091583, 0.13725490868091583), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)]}
_Greys_data = {'blue': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608, 0.58823531866073608),
(0.625, 0.45098039507865906, 0.45098039507865906), (0.75,
0.32156863808631897, 0.32156863808631897), (0.875,
0.14509804546833038, 0.14509804546833038), (1.0, 0.0, 0.0)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608,
0.58823531866073608), (0.625, 0.45098039507865906,
0.45098039507865906), (0.75, 0.32156863808631897,
0.32156863808631897), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608,
0.58823531866073608), (0.625, 0.45098039507865906,
0.45098039507865906), (0.75, 0.32156863808631897,
0.32156863808631897), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.0, 0.0)]}
_Oranges_data = {'blue': [(0.0, 0.92156863212585449,
0.92156863212585449), (0.125, 0.80784314870834351,
0.80784314870834351), (0.25, 0.63529413938522339,
0.63529413938522339), (0.375, 0.41960784792900085,
0.41960784792900085), (0.5, 0.23529411852359772, 0.23529411852359772),
(0.625, 0.074509806931018829, 0.074509806931018829), (0.75,
0.0039215688593685627, 0.0039215688593685627), (0.875,
0.011764706112444401, 0.011764706112444401), (1.0,
0.015686275437474251, 0.015686275437474251)],
'green': [(0.0, 0.96078431606292725, 0.96078431606292725), (0.125,
0.90196079015731812, 0.90196079015731812), (0.25,
0.81568628549575806, 0.81568628549575806), (0.375,
0.68235296010971069, 0.68235296010971069), (0.5,
0.55294120311737061, 0.55294120311737061), (0.625,
0.4117647111415863, 0.4117647111415863), (0.75,
0.28235295414924622, 0.28235295414924622), (0.875,
0.21176470816135406, 0.21176470816135406), (1.0,
0.15294118225574493, 0.15294118225574493)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.99215686321258545,
0.99215686321258545), (0.375, 0.99215686321258545,
0.99215686321258545), (0.5, 0.99215686321258545,
0.99215686321258545), (0.625, 0.94509804248809814,
0.94509804248809814), (0.75, 0.85098040103912354,
0.85098040103912354), (0.875, 0.65098041296005249,
0.65098041296005249), (1.0, 0.49803921580314636,
0.49803921580314636)]}
_OrRd_data = {'blue': [(0.0, 0.92549020051956177,
0.92549020051956177), (0.125, 0.78431373834609985,
0.78431373834609985), (0.25, 0.61960786581039429,
0.61960786581039429), (0.375, 0.51764708757400513,
0.51764708757400513), (0.5, 0.3490196168422699, 0.3490196168422699),
(0.625, 0.28235295414924622, 0.28235295414924622), (0.75,
0.12156862765550613, 0.12156862765550613), (0.875, 0.0, 0.0), (1.0,
0.0, 0.0)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90980392694473267, 0.90980392694473267), (0.25,
0.83137255907058716, 0.83137255907058716), (0.375,
0.73333334922790527, 0.73333334922790527), (0.5,
0.55294120311737061, 0.55294120311737061), (0.625,
0.3960784375667572, 0.3960784375667572), (0.75,
0.18823529779911041, 0.18823529779911041), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.99215686321258545,
0.99215686321258545), (0.375, 0.99215686321258545,
0.99215686321258545), (0.5, 0.98823529481887817,
0.98823529481887817), (0.625, 0.93725490570068359,
0.93725490570068359), (0.75, 0.84313726425170898,
0.84313726425170898), (0.875, 0.70196080207824707,
0.70196080207824707), (1.0, 0.49803921580314636,
0.49803921580314636)]}
_Paired_data = {'blue': [(0.0, 0.89019608497619629,
0.89019608497619629), (0.090909090909090912, 0.70588237047195435,
0.70588237047195435), (0.18181818181818182, 0.54117649793624878,
0.54117649793624878), (0.27272727272727271, 0.17254902422428131,
0.17254902422428131), (0.36363636363636365, 0.60000002384185791,
0.60000002384185791), (0.45454545454545453, 0.10980392247438431,
0.10980392247438431), (0.54545454545454541, 0.43529412150382996,
0.43529412150382996), (0.63636363636363635, 0.0, 0.0),
(0.72727272727272729, 0.83921569585800171, 0.83921569585800171),
(0.81818181818181823, 0.60392159223556519, 0.60392159223556519),
(0.90909090909090906, 0.60000002384185791, 0.60000002384185791), (1.0,
0.15686275064945221, 0.15686275064945221)],
'green': [(0.0, 0.80784314870834351, 0.80784314870834351),
(0.090909090909090912, 0.47058823704719543, 0.47058823704719543),
(0.18181818181818182, 0.87450981140136719, 0.87450981140136719),
(0.27272727272727271, 0.62745100259780884, 0.62745100259780884),
(0.36363636363636365, 0.60392159223556519, 0.60392159223556519),
(0.45454545454545453, 0.10196078568696976, 0.10196078568696976),
(0.54545454545454541, 0.74901962280273438, 0.74901962280273438),
(0.63636363636363635, 0.49803921580314636, 0.49803921580314636),
(0.72727272727272729, 0.69803923368453979, 0.69803923368453979),
(0.81818181818181823, 0.23921568691730499, 0.23921568691730499),
(0.90909090909090906, 1.0, 1.0), (1.0, 0.3490196168422699,
0.3490196168422699)],
'red': [(0.0, 0.65098041296005249, 0.65098041296005249),
(0.090909090909090912, 0.12156862765550613, 0.12156862765550613),
(0.18181818181818182, 0.69803923368453979, 0.69803923368453979),
(0.27272727272727271, 0.20000000298023224, 0.20000000298023224),
(0.36363636363636365, 0.9843137264251709, 0.9843137264251709),
(0.45454545454545453, 0.89019608497619629, 0.89019608497619629),
(0.54545454545454541, 0.99215686321258545, 0.99215686321258545),
(0.63636363636363635, 1.0, 1.0), (0.72727272727272729,
0.7921568751335144, 0.7921568751335144), (0.81818181818181823,
0.41568627953529358, 0.41568627953529358), (0.90909090909090906,
1.0, 1.0), (1.0, 0.69411766529083252, 0.69411766529083252)]}
_Pastel1_data = {'blue': [(0.0, 0.68235296010971069,
0.68235296010971069), (0.125, 0.89019608497619629,
0.89019608497619629), (0.25, 0.77254903316497803,
0.77254903316497803), (0.375, 0.89411765336990356,
0.89411765336990356), (0.5, 0.65098041296005249, 0.65098041296005249),
(0.625, 0.80000001192092896, 0.80000001192092896), (0.75,
0.74117648601531982, 0.74117648601531982), (0.875,
0.92549020051956177, 0.92549020051956177), (1.0, 0.94901961088180542,
0.94901961088180542)],
'green': [(0.0, 0.70588237047195435, 0.70588237047195435), (0.125,
0.80392158031463623, 0.80392158031463623), (0.25,
0.92156863212585449, 0.92156863212585449), (0.375,
0.79607844352722168, 0.79607844352722168), (0.5,
0.85098040103912354, 0.85098040103912354), (0.625, 1.0, 1.0),
(0.75, 0.84705883264541626, 0.84705883264541626), (0.875,
0.85490196943283081, 0.85490196943283081), (1.0,
0.94901961088180542, 0.94901961088180542)],
'red': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.70196080207824707, 0.70196080207824707), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.87058824300765991, 0.87058824300765991), (0.5,
0.99607843160629272, 0.99607843160629272), (0.625, 1.0, 1.0),
(0.75, 0.89803922176361084, 0.89803922176361084), (0.875,
0.99215686321258545, 0.99215686321258545), (1.0,
0.94901961088180542, 0.94901961088180542)]}
_Pastel2_data = {'blue': [(0.0, 0.80392158031463623,
0.80392158031463623), (0.14285714285714285, 0.67450982332229614,
0.67450982332229614), (0.2857142857142857, 0.90980392694473267,
0.90980392694473267), (0.42857142857142855, 0.89411765336990356,
0.89411765336990356), (0.5714285714285714, 0.78823530673980713,
0.78823530673980713), (0.7142857142857143, 0.68235296010971069,
0.68235296010971069), (0.8571428571428571, 0.80000001192092896,
0.80000001192092896), (1.0, 0.80000001192092896,
0.80000001192092896)],
'green': [(0.0, 0.88627451658248901, 0.88627451658248901),
(0.14285714285714285, 0.80392158031463623, 0.80392158031463623),
(0.2857142857142857, 0.83529412746429443, 0.83529412746429443),
(0.42857142857142855, 0.7921568751335144, 0.7921568751335144),
(0.5714285714285714, 0.96078431606292725, 0.96078431606292725),
(0.7142857142857143, 0.94901961088180542, 0.94901961088180542),
(0.8571428571428571, 0.88627451658248901, 0.88627451658248901),
(1.0, 0.80000001192092896, 0.80000001192092896)],
'red': [(0.0, 0.70196080207824707, 0.70196080207824707),
(0.14285714285714285, 0.99215686321258545, 0.99215686321258545),
(0.2857142857142857, 0.79607844352722168, 0.79607844352722168),
(0.42857142857142855, 0.95686274766921997, 0.95686274766921997),
(0.5714285714285714, 0.90196079015731812, 0.90196079015731812),
(0.7142857142857143, 1.0, 1.0), (0.8571428571428571,
0.94509804248809814, 0.94509804248809814), (1.0,
0.80000001192092896, 0.80000001192092896)]}
_PiYG_data = {'blue': [(0.0, 0.32156863808631897,
0.32156863808631897), (0.10000000000000001, 0.49019607901573181,
0.49019607901573181), (0.20000000000000001, 0.68235296010971069,
0.68235296010971069), (0.29999999999999999, 0.85490196943283081,
0.85490196943283081), (0.40000000000000002, 0.93725490570068359,
0.93725490570068359), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.81568628549575806, 0.81568628549575806),
(0.69999999999999996, 0.52549022436141968, 0.52549022436141968),
(0.80000000000000004, 0.25490197539329529, 0.25490197539329529),
(0.90000000000000002, 0.12941177189350128, 0.12941177189350128), (1.0,
0.098039217293262482, 0.098039217293262482)],
'green': [(0.0, 0.0039215688593685627, 0.0039215688593685627),
(0.10000000000000001, 0.10588235408067703, 0.10588235408067703),
(0.20000000000000001, 0.46666666865348816, 0.46666666865348816),
(0.29999999999999999, 0.7137255072593689, 0.7137255072593689),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.96078431606292725, 0.96078431606292725),
(0.69999999999999996, 0.88235294818878174, 0.88235294818878174),
(0.80000000000000004, 0.73725491762161255, 0.73725491762161255),
(0.90000000000000002, 0.57254904508590698, 0.57254904508590698),
(1.0, 0.39215686917304993, 0.39215686917304993)],
'red': [(0.0, 0.55686277151107788, 0.55686277151107788),
(0.10000000000000001, 0.77254903316497803, 0.77254903316497803),
(0.20000000000000001, 0.87058824300765991, 0.87058824300765991),
(0.29999999999999999, 0.94509804248809814, 0.94509804248809814),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.90196079015731812, 0.90196079015731812),
(0.69999999999999996, 0.72156864404678345, 0.72156864404678345),
(0.80000000000000004, 0.49803921580314636, 0.49803921580314636),
(0.90000000000000002, 0.30196079611778259, 0.30196079611778259),
(1.0, 0.15294118225574493, 0.15294118225574493)]}
_PRGn_data = {'blue': [(0.0, 0.29411765933036804,
0.29411765933036804), (0.10000000000000001, 0.51372551918029785,
0.51372551918029785), (0.20000000000000001, 0.67058825492858887,
0.67058825492858887), (0.29999999999999999, 0.81176471710205078,
0.81176471710205078), (0.40000000000000002, 0.90980392694473267,
0.90980392694473267), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.82745099067687988, 0.82745099067687988),
(0.69999999999999996, 0.62745100259780884, 0.62745100259780884),
(0.80000000000000004, 0.3803921639919281, 0.3803921639919281),
(0.90000000000000002, 0.21568627655506134, 0.21568627655506134), (1.0,
0.10588235408067703, 0.10588235408067703)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.16470588743686676, 0.16470588743686676), (0.20000000000000001,
0.43921568989753723, 0.43921568989753723), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.83137255907058716, 0.83137255907058716), (0.5,
0.9686274528503418, 0.9686274528503418), (0.59999999999999998,
0.94117647409439087, 0.94117647409439087), (0.69999999999999996,
0.85882353782653809, 0.85882353782653809), (0.80000000000000004,
0.68235296010971069, 0.68235296010971069), (0.90000000000000002,
0.47058823704719543, 0.47058823704719543), (1.0,
0.26666668057441711, 0.26666668057441711)],
'red': [(0.0, 0.25098040699958801, 0.25098040699958801),
(0.10000000000000001, 0.46274510025978088, 0.46274510025978088),
(0.20000000000000001, 0.60000002384185791, 0.60000002384185791),
(0.29999999999999999, 0.7607843279838562, 0.7607843279838562),
(0.40000000000000002, 0.90588235855102539, 0.90588235855102539),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.85098040103912354, 0.85098040103912354),
(0.69999999999999996, 0.65098041296005249, 0.65098041296005249),
(0.80000000000000004, 0.35294118523597717, 0.35294118523597717),
(0.90000000000000002, 0.10588235408067703, 0.10588235408067703),
(1.0, 0.0, 0.0)]}
_PuBu_data = {'blue': [(0.0, 0.9843137264251709, 0.9843137264251709),
(0.125, 0.94901961088180542, 0.94901961088180542), (0.25,
0.90196079015731812, 0.90196079015731812), (0.375,
0.85882353782653809, 0.85882353782653809), (0.5, 0.81176471710205078,
0.81176471710205078), (0.625, 0.75294119119644165,
0.75294119119644165), (0.75, 0.69019609689712524,
0.69019609689712524), (0.875, 0.55294120311737061,
0.55294120311737061), (1.0, 0.34509804844856262,
0.34509804844856262)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90588235855102539, 0.90588235855102539), (0.25,
0.81960785388946533, 0.81960785388946533), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.66274511814117432, 0.66274511814117432), (0.625,
0.56470590829849243, 0.56470590829849243), (0.75,
0.43921568989753723, 0.43921568989753723), (0.875,
0.35294118523597717, 0.35294118523597717), (1.0,
0.21960784494876862, 0.21960784494876862)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92549020051956177,
0.92549020051956177), (0.25, 0.81568628549575806,
0.81568628549575806), (0.375, 0.65098041296005249,
0.65098041296005249), (0.5, 0.45490196347236633,
0.45490196347236633), (0.625, 0.21176470816135406,
0.21176470816135406), (0.75, 0.019607843831181526,
0.019607843831181526), (0.875, 0.015686275437474251,
0.015686275437474251), (1.0, 0.0078431377187371254,
0.0078431377187371254)]}
_PuBuGn_data = {'blue': [(0.0, 0.9843137264251709,
0.9843137264251709), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.85882353782653809,
0.85882353782653809), (0.5, 0.81176471710205078, 0.81176471710205078),
(0.625, 0.75294119119644165, 0.75294119119644165), (0.75,
0.54117649793624878, 0.54117649793624878), (0.875, 0.3490196168422699,
0.3490196168422699), (1.0, 0.21176470816135406, 0.21176470816135406)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.88627451658248901, 0.88627451658248901), (0.25,
0.81960785388946533, 0.81960785388946533), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.66274511814117432, 0.66274511814117432), (0.625,
0.56470590829849243, 0.56470590829849243), (0.75,
0.5058823823928833, 0.5058823823928833), (0.875,
0.42352941632270813, 0.42352941632270813), (1.0,
0.27450981736183167, 0.27450981736183167)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92549020051956177,
0.92549020051956177), (0.25, 0.81568628549575806,
0.81568628549575806), (0.375, 0.65098041296005249,
0.65098041296005249), (0.5, 0.40392157435417175,
0.40392157435417175), (0.625, 0.21176470816135406,
0.21176470816135406), (0.75, 0.0078431377187371254,
0.0078431377187371254), (0.875, 0.0039215688593685627,
0.0039215688593685627), (1.0, 0.0039215688593685627,
0.0039215688593685627)]}
_PuOr_data = {'blue': [(0.0, 0.031372550874948502,
0.031372550874948502), (0.10000000000000001, 0.023529412224888802,
0.023529412224888802), (0.20000000000000001, 0.078431375324726105,
0.078431375324726105), (0.29999999999999999, 0.38823530077934265,
0.38823530077934265), (0.40000000000000002, 0.7137255072593689,
0.7137255072593689), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.92156863212585449, 0.92156863212585449),
(0.69999999999999996, 0.82352942228317261, 0.82352942228317261),
(0.80000000000000004, 0.67450982332229614, 0.67450982332229614),
(0.90000000000000002, 0.53333336114883423, 0.53333336114883423), (1.0,
0.29411765933036804, 0.29411765933036804)],
'green': [(0.0, 0.23137255012989044, 0.23137255012989044),
(0.10000000000000001, 0.34509804844856262, 0.34509804844856262),
(0.20000000000000001, 0.50980395078659058, 0.50980395078659058),
(0.29999999999999999, 0.72156864404678345, 0.72156864404678345),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.85490196943283081, 0.85490196943283081),
(0.69999999999999996, 0.67058825492858887, 0.67058825492858887),
(0.80000000000000004, 0.45098039507865906, 0.45098039507865906),
(0.90000000000000002, 0.15294118225574493, 0.15294118225574493),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.49803921580314636, 0.49803921580314636),
(0.10000000000000001, 0.70196080207824707, 0.70196080207824707),
(0.20000000000000001, 0.87843137979507446, 0.87843137979507446),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.84705883264541626, 0.84705883264541626),
(0.69999999999999996, 0.69803923368453979, 0.69803923368453979),
(0.80000000000000004, 0.50196081399917603, 0.50196081399917603),
(0.90000000000000002, 0.32941177487373352, 0.32941177487373352),
(1.0, 0.17647059261798859, 0.17647059261798859)]}
_PuRd_data = {'blue': [(0.0, 0.97647058963775635,
0.97647058963775635), (0.125, 0.93725490570068359,
0.93725490570068359), (0.25, 0.85490196943283081,
0.85490196943283081), (0.375, 0.78039216995239258,
0.78039216995239258), (0.5, 0.69019609689712524, 0.69019609689712524),
(0.625, 0.54117649793624878, 0.54117649793624878), (0.75,
0.33725491166114807, 0.33725491166114807), (0.875,
0.26274511218070984, 0.26274511218070984), (1.0, 0.12156862765550613,
0.12156862765550613)],
'green': [(0.0, 0.95686274766921997, 0.95686274766921997), (0.125,
0.88235294818878174, 0.88235294818878174), (0.25,
0.72549021244049072, 0.72549021244049072), (0.375,
0.58039218187332153, 0.58039218187332153), (0.5,
0.3960784375667572, 0.3960784375667572), (0.625,
0.16078431904315948, 0.16078431904315948), (0.75,
0.070588238537311554, 0.070588238537311554), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90588235855102539, 0.90588235855102539), (0.25,
0.83137255907058716, 0.83137255907058716), (0.375,
0.78823530673980713, 0.78823530673980713), (0.5,
0.87450981140136719, 0.87450981140136719), (0.625,
0.90588235855102539, 0.90588235855102539), (0.75,
0.80784314870834351, 0.80784314870834351), (0.875,
0.59607845544815063, 0.59607845544815063), (1.0,
0.40392157435417175, 0.40392157435417175)]}
_Purples_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.96078431606292725,
0.96078431606292725), (0.25, 0.92156863212585449,
0.92156863212585449), (0.375, 0.86274510622024536,
0.86274510622024536), (0.5, 0.78431373834609985, 0.78431373834609985),
(0.625, 0.729411780834198, 0.729411780834198), (0.75,
0.63921570777893066, 0.63921570777893066), (0.875,
0.56078433990478516, 0.56078433990478516), (1.0, 0.49019607901573181,
0.49019607901573181)],
'green': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.92941176891326904, 0.92941176891326904), (0.25,
0.85490196943283081, 0.85490196943283081), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.60392159223556519, 0.60392159223556519), (0.625,
0.49019607901573181, 0.49019607901573181), (0.75,
0.31764706969261169, 0.31764706969261169), (0.875,
0.15294118225574493, 0.15294118225574493), (1.0, 0.0, 0.0)],
'red': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.93725490570068359, 0.93725490570068359), (0.25,
0.85490196943283081, 0.85490196943283081), (0.375,
0.73725491762161255, 0.73725491762161255), (0.5,
0.61960786581039429, 0.61960786581039429), (0.625,
0.50196081399917603, 0.50196081399917603), (0.75,
0.41568627953529358, 0.41568627953529358), (0.875,
0.32941177487373352, 0.32941177487373352), (1.0,
0.24705882370471954, 0.24705882370471954)]}
_RdBu_data = {'blue': [(0.0, 0.12156862765550613,
0.12156862765550613), (0.10000000000000001, 0.16862745583057404,
0.16862745583057404), (0.20000000000000001, 0.30196079611778259,
0.30196079611778259), (0.29999999999999999, 0.50980395078659058,
0.50980395078659058), (0.40000000000000002, 0.78039216995239258,
0.78039216995239258), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.94117647409439087, 0.94117647409439087),
(0.69999999999999996, 0.87058824300765991, 0.87058824300765991),
(0.80000000000000004, 0.76470589637756348, 0.76470589637756348),
(0.90000000000000002, 0.67450982332229614, 0.67450982332229614), (1.0,
0.3803921639919281, 0.3803921639919281)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.094117648899555206, 0.094117648899555206), (0.20000000000000001,
0.37647059559822083, 0.37647059559822083), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.85882353782653809, 0.85882353782653809), (0.5,
0.9686274528503418, 0.9686274528503418), (0.59999999999999998,
0.89803922176361084, 0.89803922176361084), (0.69999999999999996,
0.77254903316497803, 0.77254903316497803), (0.80000000000000004,
0.57647061347961426, 0.57647061347961426), (0.90000000000000002,
0.40000000596046448, 0.40000000596046448), (1.0,
0.18823529779911041, 0.18823529779911041)],
'red': [(0.0, 0.40392157435417175, 0.40392157435417175),
(0.10000000000000001, 0.69803923368453979, 0.69803923368453979),
(0.20000000000000001, 0.83921569585800171, 0.83921569585800171),
(0.29999999999999999, 0.95686274766921997, 0.95686274766921997),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.81960785388946533, 0.81960785388946533),
(0.69999999999999996, 0.57254904508590698, 0.57254904508590698),
(0.80000000000000004, 0.26274511218070984, 0.26274511218070984),
(0.90000000000000002, 0.12941177189350128, 0.12941177189350128),
(1.0, 0.019607843831181526, 0.019607843831181526)]}
_RdGy_data = {'blue': [(0.0, 0.12156862765550613,
0.12156862765550613), (0.10000000000000001, 0.16862745583057404,
0.16862745583057404), (0.20000000000000001, 0.30196079611778259,
0.30196079611778259), (0.29999999999999999, 0.50980395078659058,
0.50980395078659058), (0.40000000000000002, 0.78039216995239258,
0.78039216995239258), (0.5, 1.0, 1.0), (0.59999999999999998,
0.87843137979507446, 0.87843137979507446), (0.69999999999999996,
0.729411780834198, 0.729411780834198), (0.80000000000000004,
0.52941179275512695, 0.52941179275512695), (0.90000000000000002,
0.30196079611778259, 0.30196079611778259), (1.0, 0.10196078568696976,
0.10196078568696976)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.094117648899555206, 0.094117648899555206), (0.20000000000000001,
0.37647059559822083, 0.37647059559822083), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.85882353782653809, 0.85882353782653809), (0.5, 1.0, 1.0),
(0.59999999999999998, 0.87843137979507446, 0.87843137979507446),
(0.69999999999999996, 0.729411780834198, 0.729411780834198),
(0.80000000000000004, 0.52941179275512695, 0.52941179275512695),
(0.90000000000000002, 0.30196079611778259, 0.30196079611778259),
(1.0, 0.10196078568696976, 0.10196078568696976)],
'red': [(0.0, 0.40392157435417175, 0.40392157435417175),
(0.10000000000000001, 0.69803923368453979, 0.69803923368453979),
(0.20000000000000001, 0.83921569585800171, 0.83921569585800171),
(0.29999999999999999, 0.95686274766921997, 0.95686274766921997),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.87843137979507446,
0.87843137979507446), (0.69999999999999996, 0.729411780834198,
0.729411780834198), (0.80000000000000004, 0.52941179275512695,
0.52941179275512695), (0.90000000000000002, 0.30196079611778259,
0.30196079611778259), (1.0, 0.10196078568696976,
0.10196078568696976)]}
_RdPu_data = {'blue': [(0.0, 0.9529411792755127, 0.9529411792755127),
(0.125, 0.86666667461395264, 0.86666667461395264), (0.25,
0.75294119119644165, 0.75294119119644165), (0.375,
0.70980393886566162, 0.70980393886566162), (0.5, 0.63137257099151611,
0.63137257099151611), (0.625, 0.59215688705444336,
0.59215688705444336), (0.75, 0.49411764740943909,
0.49411764740943909), (0.875, 0.46666666865348816,
0.46666666865348816), (1.0, 0.41568627953529358,
0.41568627953529358)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.77254903316497803, 0.77254903316497803), (0.375,
0.62352943420410156, 0.62352943420410156), (0.5,
0.40784314274787903, 0.40784314274787903), (0.625,
0.20392157137393951, 0.20392157137393951), (0.75,
0.0039215688593685627, 0.0039215688593685627), (0.875,
0.0039215688593685627, 0.0039215688593685627), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99215686321258545,
0.99215686321258545), (0.25, 0.98823529481887817,
0.98823529481887817), (0.375, 0.98039215803146362,
0.98039215803146362), (0.5, 0.9686274528503418,
0.9686274528503418), (0.625, 0.86666667461395264,
0.86666667461395264), (0.75, 0.68235296010971069,
0.68235296010971069), (0.875, 0.47843137383460999,
0.47843137383460999), (1.0, 0.28627452254295349,
0.28627452254295349)]}
_RdYlBu_data = {'blue': [(0.0, 0.14901961386203766,
0.14901961386203766), (0.10000000149011612,
0.15294118225574493, 0.15294118225574493),
(0.20000000298023224, 0.26274511218070984,
0.26274511218070984), (0.30000001192092896,
0.3803921639919281, 0.3803921639919281),
(0.40000000596046448, 0.56470590829849243,
0.56470590829849243), (0.5, 0.74901962280273438,
0.74901962280273438), (0.60000002384185791,
0.97254902124404907, 0.97254902124404907),
(0.69999998807907104, 0.91372549533843994,
0.91372549533843994), (0.80000001192092896,
0.81960785388946533, 0.81960785388946533),
(0.89999997615814209, 0.70588237047195435,
0.70588237047195435), (1.0, 0.58431375026702881,
0.58431375026702881)], 'green': [(0.0, 0.0, 0.0),
(0.10000000149011612, 0.18823529779911041,
0.18823529779911041), (0.20000000298023224,
0.42745098471641541, 0.42745098471641541),
(0.30000001192092896, 0.68235296010971069,
0.68235296010971069), (0.40000000596046448,
0.87843137979507446, 0.87843137979507446), (0.5, 1.0,
1.0), (0.60000002384185791, 0.9529411792755127,
0.9529411792755127), (0.69999998807907104,
0.85098040103912354, 0.85098040103912354),
(0.80000001192092896, 0.67843139171600342,
0.67843139171600342), (0.89999997615814209,
0.45882353186607361, 0.45882353186607361), (1.0,
0.21176470816135406, 0.21176470816135406)], 'red':
[(0.0, 0.64705884456634521, 0.64705884456634521),
(0.10000000149011612, 0.84313726425170898,
0.84313726425170898), (0.20000000298023224,
0.95686274766921997, 0.95686274766921997),
(0.30000001192092896, 0.99215686321258545,
0.99215686321258545), (0.40000000596046448,
0.99607843160629272, 0.99607843160629272), (0.5, 1.0,
1.0), (0.60000002384185791, 0.87843137979507446,
0.87843137979507446), (0.69999998807907104,
0.67058825492858887, 0.67058825492858887),
(0.80000001192092896, 0.45490196347236633,
0.45490196347236633), (0.89999997615814209,
0.27058824896812439, 0.27058824896812439), (1.0,
0.19215686619281769, 0.19215686619281769)]}
_RdYlGn_data = {'blue': [(0.0, 0.14901961386203766,
0.14901961386203766), (0.10000000000000001, 0.15294118225574493,
0.15294118225574493), (0.20000000000000001, 0.26274511218070984,
0.26274511218070984), (0.29999999999999999, 0.3803921639919281,
0.3803921639919281), (0.40000000000000002, 0.54509806632995605,
0.54509806632995605), (0.5, 0.74901962280273438, 0.74901962280273438),
(0.59999999999999998, 0.54509806632995605, 0.54509806632995605),
(0.69999999999999996, 0.41568627953529358, 0.41568627953529358),
(0.80000000000000004, 0.38823530077934265, 0.38823530077934265),
(0.90000000000000002, 0.31372550129890442, 0.31372550129890442), (1.0,
0.21568627655506134, 0.21568627655506134)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.18823529779911041, 0.18823529779911041), (0.20000000000000001,
0.42745098471641541, 0.42745098471641541), (0.29999999999999999,
0.68235296010971069, 0.68235296010971069), (0.40000000000000002,
0.87843137979507446, 0.87843137979507446), (0.5, 1.0, 1.0),
(0.59999999999999998, 0.93725490570068359, 0.93725490570068359),
(0.69999999999999996, 0.85098040103912354, 0.85098040103912354),
(0.80000000000000004, 0.74117648601531982, 0.74117648601531982),
(0.90000000000000002, 0.59607845544815063, 0.59607845544815063),
(1.0, 0.40784314274787903, 0.40784314274787903)],
'red': [(0.0, 0.64705884456634521, 0.64705884456634521),
(0.10000000000000001, 0.84313726425170898, 0.84313726425170898),
(0.20000000000000001, 0.95686274766921997, 0.95686274766921997),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.85098040103912354,
0.85098040103912354), (0.69999999999999996, 0.65098041296005249,
0.65098041296005249), (0.80000000000000004, 0.40000000596046448,
0.40000000596046448), (0.90000000000000002, 0.10196078568696976,
0.10196078568696976), (1.0, 0.0, 0.0)]}
_Reds_data = {'blue': [(0.0, 0.94117647409439087,
0.94117647409439087), (0.125, 0.82352942228317261,
0.82352942228317261), (0.25, 0.63137257099151611,
0.63137257099151611), (0.375, 0.44705882668495178,
0.44705882668495178), (0.5, 0.29019609093666077, 0.29019609093666077),
(0.625, 0.17254902422428131, 0.17254902422428131), (0.75,
0.11372549086809158, 0.11372549086809158), (0.875,
0.08235294371843338, 0.08235294371843338), (1.0, 0.050980392843484879,
0.050980392843484879)],
'green': [(0.0, 0.96078431606292725, 0.96078431606292725), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.73333334922790527, 0.73333334922790527), (0.375,
0.57254904508590698, 0.57254904508590698), (0.5,
0.41568627953529358, 0.41568627953529358), (0.625,
0.23137255012989044, 0.23137255012989044), (0.75,
0.094117648899555206, 0.094117648899555206), (0.875,
0.058823529630899429, 0.058823529630899429), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.98823529481887817,
0.98823529481887817), (0.375, 0.98823529481887817,
0.98823529481887817), (0.5, 0.9843137264251709,
0.9843137264251709), (0.625, 0.93725490570068359,
0.93725490570068359), (0.75, 0.79607844352722168,
0.79607844352722168), (0.875, 0.64705884456634521,
0.64705884456634521), (1.0, 0.40392157435417175,
0.40392157435417175)]}
_Set1_data = {'blue': [(0.0, 0.10980392247438431,
0.10980392247438431), (0.125, 0.72156864404678345,
0.72156864404678345), (0.25, 0.29019609093666077,
0.29019609093666077), (0.375, 0.63921570777893066,
0.63921570777893066), (0.5, 0.0, 0.0), (0.625, 0.20000000298023224,
0.20000000298023224), (0.75, 0.15686275064945221,
0.15686275064945221), (0.875, 0.74901962280273438,
0.74901962280273438), (1.0, 0.60000002384185791,
0.60000002384185791)],
'green': [(0.0, 0.10196078568696976, 0.10196078568696976), (0.125,
0.49411764740943909, 0.49411764740943909), (0.25,
0.68627452850341797, 0.68627452850341797), (0.375,
0.30588236451148987, 0.30588236451148987), (0.5,
0.49803921580314636, 0.49803921580314636), (0.625, 1.0, 1.0),
(0.75, 0.33725491166114807, 0.33725491166114807), (0.875,
0.5058823823928833, 0.5058823823928833), (1.0,
0.60000002384185791, 0.60000002384185791)],
'red': [(0.0, 0.89411765336990356, 0.89411765336990356), (0.125,
0.21568627655506134, 0.21568627655506134), (0.25,
0.30196079611778259, 0.30196079611778259), (0.375,
0.59607845544815063, 0.59607845544815063), (0.5, 1.0, 1.0),
(0.625, 1.0, 1.0), (0.75, 0.65098041296005249,
0.65098041296005249), (0.875, 0.9686274528503418,
0.9686274528503418), (1.0, 0.60000002384185791,
0.60000002384185791)]}
_Set2_data = {'blue': [(0.0, 0.64705884456634521,
0.64705884456634521), (0.14285714285714285, 0.38431373238563538,
0.38431373238563538), (0.2857142857142857, 0.79607844352722168,
0.79607844352722168), (0.42857142857142855, 0.76470589637756348,
0.76470589637756348), (0.5714285714285714, 0.32941177487373352,
0.32941177487373352), (0.7142857142857143, 0.18431372940540314,
0.18431372940540314), (0.8571428571428571, 0.58039218187332153,
0.58039218187332153), (1.0, 0.70196080207824707,
0.70196080207824707)],
'green': [(0.0, 0.7607843279838562, 0.7607843279838562),
(0.14285714285714285, 0.55294120311737061, 0.55294120311737061),
(0.2857142857142857, 0.62745100259780884, 0.62745100259780884),
(0.42857142857142855, 0.54117649793624878, 0.54117649793624878),
(0.5714285714285714, 0.84705883264541626, 0.84705883264541626),
(0.7142857142857143, 0.85098040103912354, 0.85098040103912354),
(0.8571428571428571, 0.76862746477127075, 0.76862746477127075),
(1.0, 0.70196080207824707, 0.70196080207824707)],
'red': [(0.0, 0.40000000596046448, 0.40000000596046448),
(0.14285714285714285, 0.98823529481887817, 0.98823529481887817),
(0.2857142857142857, 0.55294120311737061, 0.55294120311737061),
(0.42857142857142855, 0.90588235855102539, 0.90588235855102539),
(0.5714285714285714, 0.65098041296005249, 0.65098041296005249),
(0.7142857142857143, 1.0, 1.0), (0.8571428571428571,
0.89803922176361084, 0.89803922176361084), (1.0,
0.70196080207824707, 0.70196080207824707)]}
_Set3_data = {'blue': [(0.0, 0.78039216995239258,
0.78039216995239258), (0.090909090909090912, 0.70196080207824707,
0.70196080207824707), (0.18181818181818182, 0.85490196943283081,
0.85490196943283081), (0.27272727272727271, 0.44705882668495178,
0.44705882668495178), (0.36363636363636365, 0.82745099067687988,
0.82745099067687988), (0.45454545454545453, 0.38431373238563538,
0.38431373238563538), (0.54545454545454541, 0.4117647111415863,
0.4117647111415863), (0.63636363636363635, 0.89803922176361084,
0.89803922176361084), (0.72727272727272729, 0.85098040103912354,
0.85098040103912354), (0.81818181818181823, 0.74117648601531982,
0.74117648601531982), (0.90909090909090906, 0.77254903316497803,
0.77254903316497803), (1.0, 0.43529412150382996,
0.43529412150382996)],
'green': [(0.0, 0.82745099067687988, 0.82745099067687988),
(0.090909090909090912, 1.0, 1.0), (0.18181818181818182,
0.729411780834198, 0.729411780834198), (0.27272727272727271,
0.50196081399917603, 0.50196081399917603), (0.36363636363636365,
0.69411766529083252, 0.69411766529083252), (0.45454545454545453,
0.70588237047195435, 0.70588237047195435), (0.54545454545454541,
0.87058824300765991, 0.87058824300765991), (0.63636363636363635,
0.80392158031463623, 0.80392158031463623), (0.72727272727272729,
0.85098040103912354, 0.85098040103912354), (0.81818181818181823,
0.50196081399917603, 0.50196081399917603), (0.90909090909090906,
0.92156863212585449, 0.92156863212585449), (1.0,
0.92941176891326904, 0.92941176891326904)],
'red': [(0.0, 0.55294120311737061, 0.55294120311737061),
(0.090909090909090912, 1.0, 1.0), (0.18181818181818182,
0.7450980544090271, 0.7450980544090271), (0.27272727272727271,
0.9843137264251709, 0.9843137264251709), (0.36363636363636365,
0.50196081399917603, 0.50196081399917603), (0.45454545454545453,
0.99215686321258545, 0.99215686321258545), (0.54545454545454541,
0.70196080207824707, 0.70196080207824707), (0.63636363636363635,
0.98823529481887817, 0.98823529481887817), (0.72727272727272729,
0.85098040103912354, 0.85098040103912354), (0.81818181818181823,
0.73725491762161255, 0.73725491762161255), (0.90909090909090906,
0.80000001192092896, 0.80000001192092896), (1.0, 1.0, 1.0)]}
_Spectral_data = {'blue': [(0.0, 0.25882354378700256,
0.25882354378700256), (0.10000000000000001, 0.30980393290519714,
0.30980393290519714), (0.20000000000000001, 0.26274511218070984,
0.26274511218070984), (0.29999999999999999, 0.3803921639919281,
0.3803921639919281), (0.40000000000000002, 0.54509806632995605,
0.54509806632995605), (0.5, 0.74901962280273438, 0.74901962280273438),
(0.59999999999999998, 0.59607845544815063, 0.59607845544815063),
(0.69999999999999996, 0.64313727617263794, 0.64313727617263794),
(0.80000000000000004, 0.64705884456634521, 0.64705884456634521),
(0.90000000000000002, 0.74117648601531982, 0.74117648601531982), (1.0,
0.63529413938522339, 0.63529413938522339)],
'green': [(0.0, 0.0039215688593685627, 0.0039215688593685627),
(0.10000000000000001, 0.24313725531101227, 0.24313725531101227),
(0.20000000000000001, 0.42745098471641541, 0.42745098471641541),
(0.29999999999999999, 0.68235296010971069, 0.68235296010971069),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.96078431606292725,
0.96078431606292725), (0.69999999999999996, 0.86666667461395264,
0.86666667461395264), (0.80000000000000004, 0.7607843279838562,
0.7607843279838562), (0.90000000000000002, 0.53333336114883423,
0.53333336114883423), (1.0, 0.30980393290519714,
0.30980393290519714)],
'red': [(0.0, 0.61960786581039429, 0.61960786581039429),
(0.10000000000000001, 0.83529412746429443, 0.83529412746429443),
(0.20000000000000001, 0.95686274766921997, 0.95686274766921997),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.90196079015731812,
0.90196079015731812), (0.69999999999999996, 0.67058825492858887,
0.67058825492858887), (0.80000000000000004, 0.40000000596046448,
0.40000000596046448), (0.90000000000000002, 0.19607843458652496,
0.19607843458652496), (1.0, 0.36862745881080627,
0.36862745881080627)]}
_YlGn_data = {'blue': [(0.0, 0.89803922176361084,
0.89803922176361084), (0.125, 0.72549021244049072,
0.72549021244049072), (0.25, 0.63921570777893066,
0.63921570777893066), (0.375, 0.55686277151107788,
0.55686277151107788), (0.5, 0.47450980544090271, 0.47450980544090271),
(0.625, 0.364705890417099, 0.364705890417099), (0.75,
0.26274511218070984, 0.26274511218070984), (0.875,
0.21568627655506134, 0.21568627655506134), (1.0, 0.16078431904315948,
0.16078431904315948)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.98823529481887817,
0.98823529481887817), (0.25, 0.94117647409439087,
0.94117647409439087), (0.375, 0.86666667461395264,
0.86666667461395264), (0.5, 0.7764706015586853,
0.7764706015586853), (0.625, 0.67058825492858887,
0.67058825492858887), (0.75, 0.51764708757400513,
0.51764708757400513), (0.875, 0.40784314274787903,
0.40784314274787903), (1.0, 0.27058824896812439,
0.27058824896812439)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.67843139171600342,
0.67843139171600342), (0.5, 0.47058823704719543,
0.47058823704719543), (0.625, 0.25490197539329529,
0.25490197539329529), (0.75, 0.13725490868091583,
0.13725490868091583), (0.875, 0.0, 0.0), (1.0, 0.0, 0.0)]}
_YlGnBu_data = {'blue': [(0.0, 0.85098040103912354,
0.85098040103912354), (0.125, 0.69411766529083252,
0.69411766529083252), (0.25, 0.70588237047195435,
0.70588237047195435), (0.375, 0.73333334922790527,
0.73333334922790527), (0.5, 0.76862746477127075, 0.76862746477127075),
(0.625, 0.75294119119644165, 0.75294119119644165), (0.75,
0.65882354974746704, 0.65882354974746704), (0.875,
0.58039218187332153, 0.58039218187332153), (1.0, 0.34509804844856262,
0.34509804844856262)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.97254902124404907,
0.97254902124404907), (0.25, 0.91372549533843994,
0.91372549533843994), (0.375, 0.80392158031463623,
0.80392158031463623), (0.5, 0.7137255072593689,
0.7137255072593689), (0.625, 0.56862747669219971,
0.56862747669219971), (0.75, 0.36862745881080627,
0.36862745881080627), (0.875, 0.20392157137393951,
0.20392157137393951), (1.0, 0.11372549086809158,
0.11372549086809158)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92941176891326904,
0.92941176891326904), (0.25, 0.78039216995239258,
0.78039216995239258), (0.375, 0.49803921580314636,
0.49803921580314636), (0.5, 0.25490197539329529,
0.25490197539329529), (0.625, 0.11372549086809158,
0.11372549086809158), (0.75, 0.13333334028720856,
0.13333334028720856), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.031372550874948502,
0.031372550874948502)]}
_YlOrBr_data = {'blue': [(0.0, 0.89803922176361084,
0.89803922176361084), (0.125, 0.73725491762161255,
0.73725491762161255), (0.25, 0.56862747669219971,
0.56862747669219971), (0.375, 0.30980393290519714,
0.30980393290519714), (0.5, 0.16078431904315948, 0.16078431904315948),
(0.625, 0.078431375324726105, 0.078431375324726105), (0.75,
0.0078431377187371254, 0.0078431377187371254), (0.875,
0.015686275437474251, 0.015686275437474251), (1.0,
0.023529412224888802, 0.023529412224888802)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.89019608497619629,
0.89019608497619629), (0.375, 0.76862746477127075,
0.76862746477127075), (0.5, 0.60000002384185791,
0.60000002384185791), (0.625, 0.43921568989753723,
0.43921568989753723), (0.75, 0.29803922772407532,
0.29803922772407532), (0.875, 0.20392157137393951,
0.20392157137393951), (1.0, 0.14509804546833038,
0.14509804546833038)],
'red': [(0.0, 1.0, 1.0), (0.125, 1.0, 1.0), (0.25,
0.99607843160629272, 0.99607843160629272), (0.375,
0.99607843160629272, 0.99607843160629272), (0.5,
0.99607843160629272, 0.99607843160629272), (0.625,
0.92549020051956177, 0.92549020051956177), (0.75,
0.80000001192092896, 0.80000001192092896), (0.875,
0.60000002384185791, 0.60000002384185791), (1.0,
0.40000000596046448, 0.40000000596046448)]}
_YlOrRd_data = {'blue': [(0.0, 0.80000001192092896,
0.80000001192092896), (0.125, 0.62745100259780884,
0.62745100259780884), (0.25, 0.46274510025978088,
0.46274510025978088), (0.375, 0.29803922772407532,
0.29803922772407532), (0.5, 0.23529411852359772, 0.23529411852359772),
(0.625, 0.16470588743686676, 0.16470588743686676), (0.75,
0.10980392247438431, 0.10980392247438431), (0.875,
0.14901961386203766, 0.14901961386203766), (1.0, 0.14901961386203766,
0.14901961386203766)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.92941176891326904,
0.92941176891326904), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.69803923368453979,
0.69803923368453979), (0.5, 0.55294120311737061,
0.55294120311737061), (0.625, 0.30588236451148987,
0.30588236451148987), (0.75, 0.10196078568696976,
0.10196078568696976), (0.875, 0.0, 0.0), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 1.0, 1.0), (0.25,
0.99607843160629272, 0.99607843160629272), (0.375,
0.99607843160629272, 0.99607843160629272), (0.5,
0.99215686321258545, 0.99215686321258545), (0.625,
0.98823529481887817, 0.98823529481887817), (0.75,
0.89019608497619629, 0.89019608497619629), (0.875,
0.74117648601531982, 0.74117648601531982), (1.0,
0.50196081399917603, 0.50196081399917603)]}
# The next 7 palettes are from the Yorick scientific visalisation package,
# an evolution of the GIST package, both by David H. Munro.
# They are released under a BSD-like license (see LICENSE_YORICK in
# the license directory of the matplotlib source distribution).
#
# Most palette functions have been reduced to simple function descriptions
# by Reinier Heeres, since the rgb components were mostly straight lines.
# gist_earth_data and gist_ncar_data were simplified by a script and some
# manual effort.
_gist_earth_data = \
{'red': (
(0.0, 0.0, 0.0000),
(0.2824, 0.1882, 0.1882),
(0.4588, 0.2714, 0.2714),
(0.5490, 0.4719, 0.4719),
(0.6980, 0.7176, 0.7176),
(0.7882, 0.7553, 0.7553),
(1.0000, 0.9922, 0.9922),
), 'green': (
(0.0, 0.0, 0.0000),
(0.0275, 0.0000, 0.0000),
(0.1098, 0.1893, 0.1893),
(0.1647, 0.3035, 0.3035),
(0.2078, 0.3841, 0.3841),
(0.2824, 0.5020, 0.5020),
(0.5216, 0.6397, 0.6397),
(0.6980, 0.7171, 0.7171),
(0.7882, 0.6392, 0.6392),
(0.7922, 0.6413, 0.6413),
(0.8000, 0.6447, 0.6447),
(0.8078, 0.6481, 0.6481),
(0.8157, 0.6549, 0.6549),
(0.8667, 0.6991, 0.6991),
(0.8745, 0.7103, 0.7103),
(0.8824, 0.7216, 0.7216),
(0.8902, 0.7323, 0.7323),
(0.8980, 0.7430, 0.7430),
(0.9412, 0.8275, 0.8275),
(0.9569, 0.8635, 0.8635),
(0.9647, 0.8816, 0.8816),
(0.9961, 0.9733, 0.9733),
(1.0000, 0.9843, 0.9843),
), 'blue': (
(0.0, 0.0, 0.0000),
(0.0039, 0.1684, 0.1684),
(0.0078, 0.2212, 0.2212),
(0.0275, 0.4329, 0.4329),
(0.0314, 0.4549, 0.4549),
(0.2824, 0.5004, 0.5004),
(0.4667, 0.2748, 0.2748),
(0.5451, 0.3205, 0.3205),
(0.7843, 0.3961, 0.3961),
(0.8941, 0.6651, 0.6651),
(1.0000, 0.9843, 0.9843),
)}
_gist_gray_data = {
'red': gfunc[3],
'green': gfunc[3],
'blue': gfunc[3],
}
_gist_heat_data = {
'red': lambda x: 1.5 * x,
'green': lambda x: 2 * x - 1,
'blue': lambda x: 4 * x - 3,
}
_gist_ncar_data = \
{'red': (
(0.0, 0.0, 0.0000),
(0.3098, 0.0000, 0.0000),
(0.3725, 0.3993, 0.3993),
(0.4235, 0.5003, 0.5003),
(0.5333, 1.0000, 1.0000),
(0.7922, 1.0000, 1.0000),
(0.8471, 0.6218, 0.6218),
(0.8980, 0.9235, 0.9235),
(1.0000, 0.9961, 0.9961),
), 'green': (
(0.0, 0.0, 0.0000),
(0.0510, 0.3722, 0.3722),
(0.1059, 0.0000, 0.0000),
(0.1569, 0.7202, 0.7202),
(0.1608, 0.7537, 0.7537),
(0.1647, 0.7752, 0.7752),
(0.2157, 1.0000, 1.0000),
(0.2588, 0.9804, 0.9804),
(0.2706, 0.9804, 0.9804),
(0.3176, 1.0000, 1.0000),
(0.3686, 0.8081, 0.8081),
(0.4275, 1.0000, 1.0000),
(0.5216, 1.0000, 1.0000),
(0.6314, 0.7292, 0.7292),
(0.6863, 0.2796, 0.2796),
(0.7451, 0.0000, 0.0000),
(0.7922, 0.0000, 0.0000),
(0.8431, 0.1753, 0.1753),
(0.8980, 0.5000, 0.5000),
(1.0000, 0.9725, 0.9725),
), 'blue': (
(0.0, 0.5020, 0.5020),
(0.0510, 0.0222, 0.0222),
(0.1098, 1.0000, 1.0000),
(0.2039, 1.0000, 1.0000),
(0.2627, 0.6145, 0.6145),
(0.3216, 0.0000, 0.0000),
(0.4157, 0.0000, 0.0000),
(0.4745, 0.2342, 0.2342),
(0.5333, 0.0000, 0.0000),
(0.5804, 0.0000, 0.0000),
(0.6314, 0.0549, 0.0549),
(0.6902, 0.0000, 0.0000),
(0.7373, 0.0000, 0.0000),
(0.7922, 0.9738, 0.9738),
(0.8000, 1.0000, 1.0000),
(0.8431, 1.0000, 1.0000),
(0.8980, 0.9341, 0.9341),
(1.0000, 0.9961, 0.9961),
)}
_gist_rainbow_data = (
(0.000, (1.00, 0.00, 0.16)),
(0.030, (1.00, 0.00, 0.00)),
(0.215, (1.00, 1.00, 0.00)),
(0.400, (0.00, 1.00, 0.00)),
(0.586, (0.00, 1.00, 1.00)),
(0.770, (0.00, 0.00, 1.00)),
(0.954, (1.00, 0.00, 1.00)),
(1.000, (1.00, 0.00, 0.75))
)
_gist_stern_data = {
'red': (
(0.000, 0.000, 0.000), (0.0547, 1.000, 1.000),
(0.250, 0.027, 0.250), # (0.2500, 0.250, 0.250),
(1.000, 1.000, 1.000)),
'green': ((0, 0, 0), (1, 1, 1)),
'blue': (
(0.000, 0.000, 0.000), (0.500, 1.000, 1.000),
(0.735, 0.000, 0.000), (1.000, 1.000, 1.000))
}
_gist_yarg_data = {
'red': lambda x: 1 - x,
'green': lambda x: 1 - x,
'blue': lambda x: 1 - x,
}
# This bipolar color map was generated from CoolWarmFloat33.csv of
# "Diverging Color Maps for Scientific Visualization" by Kenneth Moreland.
# <http://www.kennethmoreland.com/color-maps/>
_coolwarm_data = {
'red': [
(0.0, 0.2298057, 0.2298057),
(0.03125, 0.26623388, 0.26623388),
(0.0625, 0.30386891, 0.30386891),
(0.09375, 0.342804478, 0.342804478),
(0.125, 0.38301334, 0.38301334),
(0.15625, 0.424369608, 0.424369608),
(0.1875, 0.46666708, 0.46666708),
(0.21875, 0.509635204, 0.509635204),
(0.25, 0.552953156, 0.552953156),
(0.28125, 0.596262162, 0.596262162),
(0.3125, 0.639176211, 0.639176211),
(0.34375, 0.681291281, 0.681291281),
(0.375, 0.722193294, 0.722193294),
(0.40625, 0.761464949, 0.761464949),
(0.4375, 0.798691636, 0.798691636),
(0.46875, 0.833466556, 0.833466556),
(0.5, 0.865395197, 0.865395197),
(0.53125, 0.897787179, 0.897787179),
(0.5625, 0.924127593, 0.924127593),
(0.59375, 0.944468518, 0.944468518),
(0.625, 0.958852946, 0.958852946),
(0.65625, 0.96732803, 0.96732803),
(0.6875, 0.969954137, 0.969954137),
(0.71875, 0.966811177, 0.966811177),
(0.75, 0.958003065, 0.958003065),
(0.78125, 0.943660866, 0.943660866),
(0.8125, 0.923944917, 0.923944917),
(0.84375, 0.89904617, 0.89904617),
(0.875, 0.869186849, 0.869186849),
(0.90625, 0.834620542, 0.834620542),
(0.9375, 0.795631745, 0.795631745),
(0.96875, 0.752534934, 0.752534934),
(1.0, 0.705673158, 0.705673158)],
'green': [
(0.0, 0.298717966, 0.298717966),
(0.03125, 0.353094838, 0.353094838),
(0.0625, 0.406535296, 0.406535296),
(0.09375, 0.458757618, 0.458757618),
(0.125, 0.50941904, 0.50941904),
(0.15625, 0.558148092, 0.558148092),
(0.1875, 0.604562568, 0.604562568),
(0.21875, 0.648280772, 0.648280772),
(0.25, 0.688929332, 0.688929332),
(0.28125, 0.726149107, 0.726149107),
(0.3125, 0.759599947, 0.759599947),
(0.34375, 0.788964712, 0.788964712),
(0.375, 0.813952739, 0.813952739),
(0.40625, 0.834302879, 0.834302879),
(0.4375, 0.849786142, 0.849786142),
(0.46875, 0.860207984, 0.860207984),
(0.5, 0.86541021, 0.86541021),
(0.53125, 0.848937047, 0.848937047),
(0.5625, 0.827384882, 0.827384882),
(0.59375, 0.800927443, 0.800927443),
(0.625, 0.769767752, 0.769767752),
(0.65625, 0.734132809, 0.734132809),
(0.6875, 0.694266682, 0.694266682),
(0.71875, 0.650421156, 0.650421156),
(0.75, 0.602842431, 0.602842431),
(0.78125, 0.551750968, 0.551750968),
(0.8125, 0.49730856, 0.49730856),
(0.84375, 0.439559467, 0.439559467),
(0.875, 0.378313092, 0.378313092),
(0.90625, 0.312874446, 0.312874446),
(0.9375, 0.24128379, 0.24128379),
(0.96875, 0.157246067, 0.157246067),
(1.0, 0.01555616, 0.01555616)],
'blue': [
(0.0, 0.753683153, 0.753683153),
(0.03125, 0.801466763, 0.801466763),
(0.0625, 0.84495867, 0.84495867),
(0.09375, 0.883725899, 0.883725899),
(0.125, 0.917387822, 0.917387822),
(0.15625, 0.945619588, 0.945619588),
(0.1875, 0.968154911, 0.968154911),
(0.21875, 0.98478814, 0.98478814),
(0.25, 0.995375608, 0.995375608),
(0.28125, 0.999836203, 0.999836203),
(0.3125, 0.998151185, 0.998151185),
(0.34375, 0.990363227, 0.990363227),
(0.375, 0.976574709, 0.976574709),
(0.40625, 0.956945269, 0.956945269),
(0.4375, 0.931688648, 0.931688648),
(0.46875, 0.901068838, 0.901068838),
(0.5, 0.865395561, 0.865395561),
(0.53125, 0.820880546, 0.820880546),
(0.5625, 0.774508472, 0.774508472),
(0.59375, 0.726736146, 0.726736146),
(0.625, 0.678007945, 0.678007945),
(0.65625, 0.628751763, 0.628751763),
(0.6875, 0.579375448, 0.579375448),
(0.71875, 0.530263762, 0.530263762),
(0.75, 0.481775914, 0.481775914),
(0.78125, 0.434243684, 0.434243684),
(0.8125, 0.387970225, 0.387970225),
(0.84375, 0.343229596, 0.343229596),
(0.875, 0.300267182, 0.300267182),
(0.90625, 0.259301199, 0.259301199),
(0.9375, 0.220525627, 0.220525627),
(0.96875, 0.184115123, 0.184115123),
(1.0, 0.150232812, 0.150232812)]
}
# Implementation of Carey Rappaport's CMRmap.
# See `A Color Map for Effective Black-and-White Rendering of Color-Scale
# Images' by Carey Rappaport
# http://www.mathworks.com/matlabcentral/fileexchange/2662-cmrmap-m
_CMRmap_data = {'red': ((0.000, 0.00, 0.00),
(0.125, 0.15, 0.15),
(0.250, 0.30, 0.30),
(0.375, 0.60, 0.60),
(0.500, 1.00, 1.00),
(0.625, 0.90, 0.90),
(0.750, 0.90, 0.90),
(0.875, 0.90, 0.90),
(1.000, 1.00, 1.00)),
'green': ((0.000, 0.00, 0.00),
(0.125, 0.15, 0.15),
(0.250, 0.15, 0.15),
(0.375, 0.20, 0.20),
(0.500, 0.25, 0.25),
(0.625, 0.50, 0.50),
(0.750, 0.75, 0.75),
(0.875, 0.90, 0.90),
(1.000, 1.00, 1.00)),
'blue': ((0.000, 0.00, 0.00),
(0.125, 0.50, 0.50),
(0.250, 0.75, 0.75),
(0.375, 0.50, 0.50),
(0.500, 0.15, 0.15),
(0.625, 0.00, 0.00),
(0.750, 0.10, 0.10),
(0.875, 0.50, 0.50),
(1.000, 1.00, 1.00))}
# An MIT licensed, colorblind-friendly heatmap from Wistia:
# https://github.com/wistia/heatmap-palette
# http://wistia.com/blog/heatmaps-for-colorblindness
#
# >>> import matplotlib.colors as c
# >>> colors = ["#e4ff7a", "#ffe81a", "#ffbd00", "#ffa000", "#fc7f00"]
# >>> cm = c.LinearSegmentedColormap.from_list('wistia', colors)
# >>> _wistia_data = cm._segmentdata
# >>> del _wistia_data['alpha']
#
_wistia_data = {
'red': [(0.0, 0.8941176470588236, 0.8941176470588236),
(0.25, 1.0, 1.0),
(0.5, 1.0, 1.0),
(0.75, 1.0, 1.0),
(1.0, 0.9882352941176471, 0.9882352941176471)],
'green': [(0.0, 1.0, 1.0),
(0.25, 0.9098039215686274, 0.9098039215686274),
(0.5, 0.7411764705882353, 0.7411764705882353),
(0.75, 0.6274509803921569, 0.6274509803921569),
(1.0, 0.4980392156862745, 0.4980392156862745)],
'blue': [(0.0, 0.47843137254901963, 0.47843137254901963),
(0.25, 0.10196078431372549, 0.10196078431372549),
(0.5, 0.0, 0.0),
(0.75, 0.0, 0.0),
(1.0, 0.0, 0.0)],
}
datad = {
'afmhot': _afmhot_data,
'autumn': _autumn_data,
'bone': _bone_data,
'binary': _binary_data,
'bwr': _bwr_data,
'brg': _brg_data,
'CMRmap': _CMRmap_data,
'cool': _cool_data,
'copper': _copper_data,
'cubehelix': _cubehelix_data,
'flag': _flag_data,
'gnuplot': _gnuplot_data,
'gnuplot2': _gnuplot2_data,
'gray': _gray_data,
'hot': _hot_data,
'hsv': _hsv_data,
'jet': _jet_data,
'ocean': _ocean_data,
'pink': _pink_data,
'prism': _prism_data,
'rainbow': _rainbow_data,
'seismic': _seismic_data,
'spring': _spring_data,
'summer': _summer_data,
'terrain': _terrain_data,
'winter': _winter_data,
'nipy_spectral': _nipy_spectral_data,
'spectral': _nipy_spectral_data, # alias for backward compatibility
}
datad['Accent'] = _Accent_data
datad['Blues'] = _Blues_data
datad['BrBG'] = _BrBG_data
datad['BuGn'] = _BuGn_data
datad['BuPu'] = _BuPu_data
datad['Dark2'] = _Dark2_data
datad['GnBu'] = _GnBu_data
datad['Greens'] = _Greens_data
datad['Greys'] = _Greys_data
datad['Oranges'] = _Oranges_data
datad['OrRd'] = _OrRd_data
datad['Paired'] = _Paired_data
datad['Pastel1'] = _Pastel1_data
datad['Pastel2'] = _Pastel2_data
datad['PiYG'] = _PiYG_data
datad['PRGn'] = _PRGn_data
datad['PuBu'] = _PuBu_data
datad['PuBuGn'] = _PuBuGn_data
datad['PuOr'] = _PuOr_data
datad['PuRd'] = _PuRd_data
datad['Purples'] = _Purples_data
datad['RdBu'] = _RdBu_data
datad['RdGy'] = _RdGy_data
datad['RdPu'] = _RdPu_data
datad['RdYlBu'] = _RdYlBu_data
datad['RdYlGn'] = _RdYlGn_data
datad['Reds'] = _Reds_data
datad['Set1'] = _Set1_data
datad['Set2'] = _Set2_data
datad['Set3'] = _Set3_data
datad['Spectral'] = _Spectral_data
datad['YlGn'] = _YlGn_data
datad['YlGnBu'] = _YlGnBu_data
datad['YlOrBr'] = _YlOrBr_data
datad['YlOrRd'] = _YlOrRd_data
datad['gist_earth'] = _gist_earth_data
datad['gist_gray'] = _gist_gray_data
datad['gist_heat'] = _gist_heat_data
datad['gist_ncar'] = _gist_ncar_data
datad['gist_rainbow'] = _gist_rainbow_data
datad['gist_stern'] = _gist_stern_data
datad['gist_yarg'] = _gist_yarg_data
datad['coolwarm'] = _coolwarm_data
datad['Wistia'] = _wistia_data
| mit |
JsNoNo/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
vortex-ape/scikit-learn | examples/model_selection/plot_validation_curve.py | 141 | 1931 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.model_selection import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
lw = 2
plt.semilogx(param_range, train_scores_mean, label="Training score",
color="darkorange", lw=lw)
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color="darkorange", lw=lw)
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="navy", lw=lw)
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color="navy", lw=lw)
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
mrustl/flopy | flopy/utils/datafile.py | 1 | 17018 | """
Module to read MODFLOW output files. The module contains shared
abstract classes that should not be directly accessed.
"""
from __future__ import print_function
import os
import numpy as np
import flopy.utils
class Header(object):
"""
The header class is an abstract base class to create headers for MODFLOW files
"""
def __init__(self, filetype=None, precision='single'):
floattype = 'f4'
if precision == 'double':
floattype = 'f8'
self.header_types = ['head', 'ucn']
if filetype is None:
self.header_type = None
else:
self.header_type = filetype.lower()
if self.header_type in self.header_types:
if self.header_type == 'head':
self.dtype = np.dtype([('kstp', 'i4'), ('kper', 'i4'),
('pertim', floattype),
('totim', floattype),
('text', 'a16'),
('ncol', 'i4'), ('nrow', 'i4'),
('ilay', 'i4')])
elif self.header_type == 'ucn':
self.dtype = np.dtype(
[('ntrans', 'i4'), ('kstp', 'i4'), ('kper', 'i4'),
('totim', floattype), ('text', 'a16'),
('ncol', 'i4'), ('nrow', 'i4'), ('ilay', 'i4')])
self.header = np.ones(1, self.dtype)
else:
self.dtype = None
self.header = None
print(
'Specified {0} type is not available. Available types are:'.format(
self.header_type))
for idx, t in enumerate(self.header_types):
print(' {0} {1}'.format(idx + 1, t))
return
def get_dtype(self):
"""
Return the dtype
"""
return self.dtype
def get_names(self):
"""
Return the dtype names
"""
return self.dtype.names
def get_values(self):
"""
Return the header values
"""
if self.header is None:
return None
else:
return self.header[0]
class LayerFile(object):
"""
The LayerFile class is the abstract base class from which specific derived
classes are formed. LayerFile This class should not be instantiated directly.
"""
def __init__(self, filename, precision, verbose, kwargs):
assert os.path.exists(
filename), "datafile error: datafile not found:" + str(filename)
self.filename = filename
self.precision = precision
self.verbose = verbose
self.file = open(self.filename, 'rb')
self.nrow = 0
self.ncol = 0
self.nlay = 0
self.times = []
self.kstpkper = []
self.recordarray = []
self.iposarray = []
if precision == 'single':
self.realtype = np.float32
elif precision == 'double':
self.realtype = np.float64
else:
raise Exception('Unknown precision specified: ' + precision)
self.model = None
self.dis = None
self.sr = None
if 'model' in kwargs.keys():
self.model = kwargs.pop('model')
self.sr = self.model.sr
self.dis = self.model.dis
if 'dis' in kwargs.keys():
self.dis = kwargs.pop('dis')
self.sr = self.dis.parent.sr
if 'sr' in kwargs.keys():
self.sr = kwargs.pop('sr')
if len(kwargs.keys()) > 0:
args = ','.join(kwargs.keys())
raise Exception('LayerFile error: unrecognized kwargs: ' + args)
# read through the file and build the pointer index
self._build_index()
# now that we read the data and know nrow and ncol,
# we can make a generic sr if needed
if self.sr is None:
self.sr = flopy.utils.SpatialReference(np.ones(self.ncol),
np.ones(self.nrow), 0)
return
def to_shapefile(self, filename, kstpkper=None, totim=None, mflay=None,
attrib_name='lf_data'):
"""
Export model output data to a shapefile at a specific location
in LayerFile instance.
Parameters
----------
filename : str
Shapefile name to write
kstpkper : tuple of ints
A tuple containing the time step and stress period (kstp, kper).
These are zero-based kstp and kper values.
totim : float
The simulation time.
mflay : integer
MODFLOW zero-based layer number to return. If None, then layer 1
will be written
attrib_name : str
Base name of attribute columns. (default is 'lf_data')
Returns
----------
None
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> hdobj = flopy.utils.HeadFile('test.hds')
>>> times = hdobj.get_times()
>>> hdobj.to_shapefile('test_heads_sp6.shp', totim=times[-1])
"""
plotarray = np.atleast_3d(self.get_data(kstpkper=kstpkper,
totim=totim, mflay=mflay)
.transpose()).transpose()
if mflay != None:
attrib_dict = {
attrib_name + '{0:03d}'.format(mflay): plotarray[0, :, :]}
else:
attrib_dict = {}
for k in range(plotarray.shape[0]):
name = attrib_name + '{0:03d}'.format(k)
attrib_dict[name] = plotarray[k]
from flopy.export.shapefile_utils import write_grid_shapefile
write_grid_shapefile(filename, self.sr, attrib_dict)
def plot(self, axes=None, kstpkper=None, totim=None, mflay=None,
filename_base=None, **kwargs):
'''
Plot 3-D model output data in a specific location
in LayerFile instance
Parameters
----------
axes : list of matplotlib.pyplot.axis
List of matplotlib.pyplot.axis that will be used to plot
data for each layer. If axes=None axes will be generated.
(default is None)
kstpkper : tuple of ints
A tuple containing the time step and stress period (kstp, kper).
These are zero-based kstp and kper values.
totim : float
The simulation time.
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
**kwargs : dict
pcolor : bool
Boolean used to determine if matplotlib.pyplot.pcolormesh
plot will be plotted. (default is True)
colorbar : bool
Boolean used to determine if a color bar will be added to
the matplotlib.pyplot.pcolormesh. Only used if pcolor=True.
(default is False)
contour : bool
Boolean used to determine if matplotlib.pyplot.contour
plot will be plotted. (default is False)
clabel : bool
Boolean used to determine if matplotlib.pyplot.clabel
will be plotted. Only used if contour=True. (default is False)
grid : bool
Boolean used to determine if the model grid will be plotted
on the figure. (default is False)
masked_values : list
List of unique values to be excluded from the plot.
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
Returns
----------
None
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> hdobj = flopy.utils.HeadFile('test.hds')
>>> times = hdobj.get_times()
>>> hdobj.plot(totim=times[-1])
'''
if 'file_extension' in kwargs:
fext = kwargs.pop('file_extension')
fext = fext.replace('.', '')
else:
fext = 'png'
masked_values = kwargs.pop("masked_values", [])
if self.model is not None:
if self.model.bas6 is not None:
masked_values.append(self.model.bas6.hnoflo)
kwargs["masked_values"] = masked_values
filenames = None
if filename_base is not None:
if mflay is not None:
i0 = int(mflay)
if i0 + 1 >= self.nlay:
i0 = self.nlay - 1
i1 = i0 + 1
else:
i0 = 0
i1 = self.nlay
filenames = []
[filenames.append(
'{}_Layer{}.{}'.format(filename_base, k + 1, fext)) for k in
range(i0, i1)]
# make sure we have a (lay,row,col) shape plotarray
plotarray = np.atleast_3d(self.get_data(kstpkper=kstpkper,
totim=totim, mflay=mflay)
.transpose()).transpose()
import flopy.plot.plotutil as pu
return pu._plot_array_helper(plotarray, model=self.model, sr=self.sr,
axes=axes,
filenames=filenames,
mflay=mflay, **kwargs)
def _build_index(self):
"""
Build the recordarray and iposarray, which maps the header information
to the position in the formatted file.
"""
raise Exception(
'Abstract method _build_index called in LayerFile. This method needs to be overridden.')
def list_records(self):
"""
Print a list of all of the records in the file
obj.list_records()
"""
for header in self.recordarray:
print(header)
return
def _get_data_array(self, totim=0):
"""
Get the three dimensional data array for the
specified kstp and kper value or totim value.
"""
if totim > 0.:
keyindices = np.where((self.recordarray['totim'] == totim))[0]
else:
raise Exception('Data not found...')
# initialize head with nan and then fill it
data = np.empty((self.nlay, self.nrow, self.ncol),
dtype=self.realtype)
data[:, :, :] = np.nan
for idx in keyindices:
ipos = self.iposarray[idx]
ilay = self.recordarray['ilay'][idx]
if self.verbose:
print('Byte position in file: {0}'.format(ipos))
self.file.seek(ipos, 0)
data[ilay - 1, :, :] = self._read_data()
return data
def get_times(self):
"""
Get a list of unique times in the file
Returns
----------
out : list of floats
List contains unique simulation times (totim) in binary file.
"""
return self.times
def get_kstpkper(self):
"""
Get a list of unique stress periods and time steps in the file
Returns
----------
out : list of (kstp, kper) tuples
List of unique kstp, kper combinations in binary file. kstp and
kper values are presently zero-based.
"""
kstpkper = []
for kstp, kper in self.kstpkper:
kstpkper.append((kstp - 1, kper - 1))
return kstpkper
def get_data(self, kstpkper=None, idx=None, totim=None, mflay=None):
"""
Get data from the file for the specified conditions.
Parameters
----------
idx : int
The zero-based record number. The first record is record 0.
kstpkper : tuple of ints
A tuple containing the time step and stress period (kstp, kper).
These are zero-based kstp and kper values.
totim : float
The simulation time.
mflay : integer
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (Default is None.)
Returns
----------
data : numpy array
Array has size (nlay, nrow, ncol) if mflay is None or it has size
(nrow, ncol) if mlay is specified.
See Also
--------
Notes
-----
if both kstpkper and totim are None, will return the last entry
Examples
--------
"""
# One-based kstp and kper for pulling out of recarray
if kstpkper is not None:
kstp1 = kstpkper[0] + 1
kper1 = kstpkper[1] + 1
idx = np.where(
(self.recordarray['kstp'] == kstp1) &
(self.recordarray['kper'] == kper1))
if idx[0].shape[0] == 0:
raise Exception("get_data() error: kstpkper not found:{0}".
format(kstpkper))
totim1 = self.recordarray[idx]["totim"][0]
elif totim is not None:
totim1 = totim
elif idx is not None:
totim1 = self.recordarray['totim'][idx]
else:
totim1 = self.times[-1]
data = self._get_data_array(totim1)
if mflay is None:
return data
else:
return data[mflay, :, :]
def get_alldata(self, mflay=None, nodata=-9999):
"""
Get all of the data from the file.
Parameters
----------
mflay : integer
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (Default is None.)
nodata : float
The nodata value in the data array. All array values that have the
nodata value will be assigned np.nan.
Returns
----------
data : numpy array
Array has size (ntimes, nlay, nrow, ncol) if mflay is None or it
has size (ntimes, nrow, ncol) if mlay is specified.
See Also
--------
Notes
-----
Examples
--------
"""
rv = []
for totim in self.times:
h = self.get_data(totim=totim, mflay=mflay)
rv.append(h)
rv = np.array(rv)
rv[rv == nodata] = np.nan
return rv
def _read_data(self):
"""
Read data from file
"""
raise Exception(
'Abstract method _read_data called in LayerFile. This method needs to be overridden.')
def _build_kijlist(self, idx):
if isinstance(idx, list):
kijlist = idx
elif isinstance(idx, tuple):
kijlist = [idx]
# Check to make sure that k, i, j are within range, otherwise
# the seek approach won't work. Can't use k = -1, for example.
for k, i, j in kijlist:
fail = False
errmsg = 'Invalid cell index. Cell ' + str(
(k, i, j)) + ' not within model grid: ' + \
str((self.nlay, self.nrow, self.ncol))
if k < 0 or k > self.nlay - 1:
fail = True
if i < 0 or i > self.nrow - 1:
fail = True
if j < 0 or j > self.ncol - 1:
fail = True
if fail:
raise Exception(errmsg)
return kijlist
def _get_nstation(self, idx, kijlist):
if isinstance(idx, list):
return len(kijlist)
elif isinstance(idx, tuple):
return 1
def _init_result(self, nstation):
# Initialize result array and put times in first column
result = np.empty((len(self.times), nstation + 1),
dtype=self.realtype)
result[:, :] = np.nan
result[:, 0] = np.array(self.times)
return result
def close(self):
"""
Close the file handle.
"""
self.file.close()
return
| bsd-3-clause |
apriha/lineage | tests/test_individual.py | 1 | 2743 | """
MIT License
Copyright (c) 2017 Andrew Riha
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pandas as pd
from snps import SNPs
from tests import BaseLineageTestCase
class TestIndividual(BaseLineageTestCase):
def test_name(self):
ind = self.l.create_individual("test")
assert ind.name == "test"
def test_get_var_name(self):
ind = self.l.create_individual("test a. name")
assert ind.get_var_name() == "test_a__name"
def test___repr__(self):
ind = self.l.create_individual("test")
assert "Individual('test')" == ind.__repr__()
def test_load_path(self):
ind = self.l.create_individual("test", "tests/input/generic.csv")
pd.testing.assert_frame_equal(ind.snps, self.generic_snps(), check_exact=True)
def test_load_SNPs(self):
s = SNPs("tests/input/generic.csv")
ind = self.l.create_individual("test", s)
pd.testing.assert_frame_equal(ind.snps, self.generic_snps(), check_exact=True)
def test_load_list_bytes(self):
with open("tests/input/generic.csv", "rb") as f:
data = f.read()
ind = self.l.create_individual("test", [SNPs(), data])
pd.testing.assert_frame_equal(ind.snps, self.generic_snps(), check_exact=True)
def test_load_resource_output_dirs(self):
ind = self.l.create_individual(
"test",
"tests/input/generic.csv",
output_dir="output1",
resources_dir="resources1",
)
self.assertEqual(self.l._output_dir, "output")
self.assertEqual(self.l._resources_dir, "resources")
self.assertEqual(ind._output_dir, "output1")
pd.testing.assert_frame_equal(ind.snps, self.generic_snps(), check_exact=True)
| gpl-3.0 |
azmainamin/cats_v_dogs_neural_networks | prepare_data.py | 1 | 7672 | import cPickle as pickle
import numpy as np
import timeit
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv2d
import matplotlib.pyplot as plt
from matplotlib import cm
from sys import version_info
import os
from PIL import Image
import gc
CAT = float(0)
DOG = float(1)
"""
Created on Wed May 04 19:21:48 2016
Preparing the train, test and validation datasets.
@author: aminm
"""
def pickleToGrayscale(pathname):
"""
Input: Pickled images.
Output: Pickled grayscale images separated into 3 batches: train, test, validation.
"""
batch = pickle.load(open(pathname,'rb'))
grayscale = []
#code below rearranges the pixel RGB values. Originally, array is in pattern
#RGBRGBRGB. code rearranges this so it's all R's first, then G's, then B's.
#ratchet af
print "Reading the images..."
for image in batch:
stepper = np.arange(0, 3072, 3)
stepperList = []
for x in stepper:
stepperList.append(image[x])
stepper += 1
for x in stepper:
stepperList.append(image[x])
stepper += 1
for x in stepper:
stepperList.append(image[x])
stepperList = np.array(stepperList)
gray = 0.21*stepperList[0:1024] + 0.72*stepperList[1024:2048] + 0.07*stepperList[2048:3072]
grayscale.append(gray)
grayscale = np.asarray(grayscale)
# Train: 6000 cats + 6000 dogs
# Test: 4000 cats + 4000 dogs
# Validation: 1250 cats + 1250 dogs
train = np.concatenate((grayscale[:6000], grayscale[12500:18500]), axis = 0)
test = np.concatenate((grayscale[6000:10000] , grayscale[18500:22500]), axis = 0)
valid = np.concatenate((grayscale[10000:12500],grayscale[22500:25000]), axis = 0)
with open("32_grayscale_train_img.pkl","wb") as f:
print "Pickling the training images ..."
pickle.dump(train, f)
with open("32_grayscale_test_img.pkl","wb") as f:
print "Pickling the testing images ..."
pickle.dump(test, f)
with open("32_grayscale_valid_img.pkl","wb") as f:
print "Pickling the validation images ..."
pickle.dump(valid, f)
f.close()
def displayImageFromArray(pathname, index):
"""
Input: A pickled image file.
Output: Displays the image and label at index
"""
print "Getting image..."
labels = pickle.load(open("train_labels.pkl","rb"))
label = labels[index]
# print label
images = pickle.load(open(pathname,'rb'))
image = images[index]
image = Image.fromarray(image.reshape((32,32)))
plt.imshow(image,cmap = cm.gray)
plt.show()
def get_imlist(path):
"""returns a list of filenames for all jpg images in a directory"""
return [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.jpg')]
def create_db(img_pkl, labels_pkl, size):
"""
Input: JPGs from a directory.
Output: Pickles the resized images and label as np.ndarray in two separate files.
"""
batch = []
labels = []
print "Reading all the images..."
for file_name in get_imlist(os.getcwd() + "/train"):
if "dog" in file_name:
label = DOG
else:
label = CAT
imgx = Image.open(file_name)
labels.append(label)
imgx = imgx.resize((size,size))
imag = np.asarray(imgx)
imag = np.ravel(imag)
batch.append(imag)
#batch = np.asarray(batch)
labels = np.asarray(labels)
# print label
print "Reading images done..."
#Pickle all 25K images in one pickle file
with open(img_pkl,"wb") as f:
print "Pickling the images ..."
pickle.dump(batch,f,-1)
gc.collect()
f.close()
#Pickle all labels in one file.
with open("labels.pkl", "wb") as f:
print "Pickling labels ..."
pickle.dump(labels,f,-1)
f.close()
def separate_labels(labels_path):
"""
Input: a pickled label file
Output: 3 separate labels for train, test and validation
"""
labels = pickle.load(open(labels_path,'rb'))
train_label = np.concatenate((labels[:6000],labels[12500:18500]),axis =0)
test_label = np.concatenate((labels[6000:10000],labels[18500:22500]),axis = 0)
valid_label = np.concatenate((labels[10000:12500],labels[22500:25000]), axis=0)
with open("train_labels.pkl", "wb") as f:
print "Pickling train labels ..."
pickle.dump(train_label,f,-1)
with open("test_labels.pkl", "wb") as f:
print "Pickling test labels ..."
pickle.dump(test_label,f,-1)
with open("valid_labels.pkl", "wb") as f:
print "Pickling test labels ..."
pickle.dump(valid_label,f,-1)
f.close()
def load_data():
"""
Loads pickled images and their corresponding labels and returns a list of tuples of Theano
shared variables.
"""
train_images = pickle.load(open("32_grayscale_train_img.pkl", "rb"))
# print len(train_images)
train_labels = pickle.load(open("train_labels.pkl", "rb"))
# print len(train_labels)
valid_images = pickle.load(open("32_grayscale_valid_img.pkl", "rb"))
valid_labels = pickle.load(open("valid_labels.pkl", "rb"))
test_images = pickle.load(open("32_grayscale_test_img.pkl", "rb"))
test_labels = pickle.load(open("test_labels.pkl", "rb"))
train_set = (train_images, train_labels)
valid_set = (valid_images, valid_labels)
test_set = (test_images, test_labels)
#return train_set, valid_set, test_set
def shared_dataset(data_xy, borrow=True):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
shared_x = theano.shared(np.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(np.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
# When storing data on the GPU it has to be stored as floats
# therefore we will store the labels as ``floatX`` as well
# (``shared_y`` does exactly that). But during our computations
# we need them as ints (we use labels as index, and if they are
# floats it doesn't make sense) therefore instead of returning
# ``shared_y`` we will have to cast it to int. This little hack
# lets ous get around this issue
return shared_x, T.cast(shared_y, 'int32')
test_set_x, test_set_y = shared_dataset(test_set)
valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval
def main():
'''
This little test script will display the first image, its name, and its target value
'''
create_db("images_32.pkl", "labels.pkl", 32)
separate_labels("labels.pkl")
DATADIR = os.getcwd()
pickleToGrayscale("images_32.pkl")
displayImageFromArray("32_grayscale_test_img.pkl", 7999)
if __name__ == '__main__':
main()
| gpl-3.0 |
JanetMatsen/meta4_bins_janalysis | network/network.py | 1 | 3545 | #!/usr/bin/env python2.7
'''
Co-occurence network from expression data.
'''
import os
import pickle
import sys
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import pandas as pd
import readline
from rpy2.robjects.packages import importr
from rpy2.robjects.vectors import FloatVector
from scipy import linalg
from sklearn.covariance import LedoitWolf
DATA_PICKLE = 'data.pkl'
FILENAME = 'normalized_counts.tsv'
PRUNE_GENES = 10000
PDF_FILENAME = 'network.py.pdf'
def main():
'''
Constructs a co-occurence network from gene expression data.
Main entry point to code.
'''
# Read in the data
if os.path.isfile(DATA_PICKLE):
print("reading previously saved data from pickle %s" % (DATA_PICKLE))
with open(DATA_PICKLE, 'rb') as file:
df = pickle.load(file)
lwe = pickle.load(file)
pmat = pickle.load(file)
pcore_indices = pickle.load(file)
pcor = pickle.load(file)
lfdr_pcor = pickle.load(file)
#prob = pickle.load(file)
else:
print("reading in data from %s" % (FILENAME))
df = pd.read_csv(FILENAME, sep='\t')
print("found %d rows and %d columns" % (df.shape[0], df.shape[1]))
# compute the row means and sort the data frame by descinding means
df['row_means'] = df.mean(axis=1)
df.sort_values('row_means', axis=0, ascending=False, inplace=True)
df.drop('row_means', axis=1, inplace=True)
# take the most abundant genes
df = df.head(PRUNE_GENES)
# Ledoit-Wolf optimal shrinkage coefficient estimate
print("computing Ledoit-Wolf optimal shrinkage coeffecient estimate")
lwe = LedoitWolf().fit(df.transpose())
pmat = lwe.get_precision()
# Convert symmetric matrix to array, first by getting indices
# of the off diagonal elements, second by pulling them into
# separate array (pcor).
print("extracting off diagnol elements of precision matrix")
pcor_indices = np.triu_indices(pmat.shape[0], 1)
pcor = pmat[pcor_indices]
# Determine edges by computing lfdr of pcor.
print("computing lfdr of partial correlations")
fdrtool = importr('fdrtool')
lfdr_pcor = fdrtool.fdrtool(FloatVector(pcor), statistic="correlation", plot=False)
#prob = 1-lfdr_pcor['lfdr']
with open(DATA_PICKLE, 'wb') as file:
pickle.dump(df, file, pickle.HIGHEST_PROTOCOL)
pickle.dump(lwe, file, pickle.HIGHEST_PROTOCOL)
pickle.dump(pmat, file, pickle.HIGHEST_PROTOCOL)
pickle.dump(pcor_indices, file, pickle.HIGHEST_PROTOCOL)
pickle.dump(pcor, file, pickle.HIGHEST_PROTOCOL)
pickle.dump(lfdr_pcor, file, pickle.HIGHEST_PROTOCOL)
#pickle.dump(prob, file, pickle.HIGHEST_PROTOCOL)
print("making 1-lfdr vs. pcor plot")
prob = 1-np.array(lfdr_pcor.rx2('lfdr'))
with PdfPages(PDF_FILENAME) as pdf:
plt.figure(figsize=(3, 3))
plt.plot(range(7), [3, 1, 4, 1, 5, 9, 2], 'r-o')
plt.title('Page One')
pdf.savefig() # saves the current figure into a pdf page
plt.close()
plt.plot(pcor[0:10000:10], prob[0:10000:10], 'o', markeredgecolor='k', markersize=3)
plt.title("THIS IS A PLOT TITLE, YOU BET")
plt.xlabel('partial correlation')
plt.ylabel('lfdr')
pdf.savefig
plt.close()
if __name__ == "__main__":
main()
| bsd-2-clause |
Borillion/mplh5canvas | setup.py | 4 | 1650 | #!/usr/bin/env python
from setuptools import setup, find_packages
from distutils.version import LooseVersion
import os
os.environ['MPLCONFIGDIR'] = "."
# temporarily redirect configuration directory
# to prevent matplotlib import testing for
# writeable directory outside of sandbox
from matplotlib import __version__ as mpl_version
import sys
if LooseVersion(mpl_version) < LooseVersion("0.99.1.1"):
print "The HTML5 Canvas Backend requires matplotlib 0.99.1.1 or newer. " \
"Your version (%s) appears older than this. Unable to continue..." % (mpl_version,)
sys.exit(0)
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
INSTALL = open(os.path.join(here, 'INSTALL.rst')).read()
setup (
name="mplh5canvas",
version="0.7",
author="Simon Ratcliffe, Ludwig Schwardt",
author_email="sratcliffe@ska.ac.za, ludwig@ska.ac.za",
url="http://code.google.com/p/mplh5canvas/",
description="A matplotlib backend based on HTML5 Canvas.",
long_description=README + "\n\n" + INSTALL,
license="BSD",
classifiers=["Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Topic :: Software Development :: Libraries :: Python Modules",
],
packages = find_packages(),
scripts = [],
install_requires = ['matplotlib', 'mod_pywebsocket'],
zip_safe = False,
)
| bsd-3-clause |
Srisai85/scikit-learn | sklearn/datasets/base.py | 196 | 18554 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __getstate__(self):
return self.__dict__
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
| bsd-3-clause |
wrichert/BuildingMachineLearningSystemsWithPython | ch05/PosTagFreqVectorizer.py | 27 | 9486 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import re
from operator import itemgetter
from collections import Mapping
import scipy.sparse as sp
from sklearn.base import BaseEstimator
from sklearn.feature_extraction.text import strip_accents_ascii, strip_accents_unicode
import nltk
from collections import Counter
try:
import ujson as json # UltraJSON if available
except:
import json
poscache_filename = "poscache.json"
class PosCounter(Counter):
def __init__(self, iterable=(), normalize=True, poscache=None, **kwargs):
self.n_sents = 0
self.normalize = normalize
self.poscache = poscache
super(PosCounter, self).__init__(iterable, **kwargs)
def update(self, other):
"""Adds counts for elements in other"""
if isinstance(other, self.__class__):
self.n_sents += other.n_sents
for x, n in other.items():
self[x] += n
else:
for sent in other:
self.n_sents += 1
if self.poscache is not None:
if sent in self.poscache:
tags = self.poscache[sent]
else:
self.poscache[sent] = tags = nltk.pos_tag(
nltk.word_tokenize(sent))
else:
tags = nltk.pos_tag(nltk.word_tokenize(sent))
for x in tags:
tok, tag = x
self[tag] += 1
if self.normalize:
for x, n in self.items():
self[x] /= float(self.n_sents)
class PosTagFreqVectorizer(BaseEstimator):
"""
Convert a collection of raw documents to a matrix Pos tag frequencies
"""
def __init__(self, input='content', charset='utf-8',
charset_error='strict', strip_accents=None,
vocabulary=None,
normalize=True,
dtype=float):
self.input = input
self.charset = charset
self.charset_error = charset_error
self.strip_accents = strip_accents
if vocabulary is not None:
self.fixed_vocabulary = True
if not isinstance(vocabulary, Mapping):
vocabulary = dict((t, i) for i, t in enumerate(vocabulary))
self.vocabulary_ = vocabulary
else:
self.fixed_vocabulary = False
try:
self.poscache = json.load(open(poscache_filename, "r"))
except IOError:
self.poscache = {}
self.normalize = normalize
self.dtype = dtype
def write_poscache(self):
json.dump(self.poscache, open(poscache_filename, "w"))
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
doc = open(doc, 'rb').read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.charset, self.charset_error)
return doc
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the however of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif hasattr(self.strip_accents, '__call__'):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
only_prose = lambda s: re.sub('<[^>]*>', '', s).replace("\n", " ")
return lambda x: strip_accents(only_prose(x))
def build_tokenizer(self):
"""Return a function that split a string in sequence of tokens"""
return nltk.sent_tokenize
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
preprocess = self.build_preprocessor()
tokenize = self.build_tokenizer()
return lambda doc: tokenize(preprocess(self.decode(doc)))
def _term_count_dicts_to_matrix(self, term_count_dicts):
i_indices = []
j_indices = []
values = []
vocabulary = self.vocabulary_
for i, term_count_dict in enumerate(term_count_dicts):
for term, count in term_count_dict.items():
j = vocabulary.get(term)
if j is not None:
i_indices.append(i)
j_indices.append(j)
values.append(count)
# free memory as we go
term_count_dict.clear()
shape = (len(term_count_dicts), max(vocabulary.values()) + 1)
spmatrix = sp.csr_matrix((values, (i_indices, j_indices)),
shape=shape, dtype=self.dtype)
return spmatrix
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents
Parameters
----------
raw_documents: iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return the count vectors
This is more efficient than calling fit followed by transform.
Parameters
----------
raw_documents: iterable
an iterable which yields either str, unicode or file objects
Returns
-------
vectors: array, [n_samples, n_features]
"""
if self.fixed_vocabulary:
# No need to fit anything, directly perform the transformation.
# We intentionally don't call the transform method to make it
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer
analyze = self.build_analyzer()
term_counts_per_doc = [PosCounter(analyze(doc), normalize=self.normalize, poscache=self.poscache)
for doc in raw_documents]
return self._term_count_dicts_to_matrix(term_counts_per_doc)
self.vocabulary_ = {}
# result of document conversion to term count dicts
term_counts_per_doc = []
term_counts = Counter()
analyze = self.build_analyzer()
for doc in raw_documents:
term_count_current = PosCounter(
analyze(doc), normalize=self.normalize, poscache=self.poscache)
term_counts.update(term_count_current)
term_counts_per_doc.append(term_count_current)
self.write_poscache()
terms = set(term_counts)
# store map from term name to feature integer index: we sort the term
# to have reproducible outcome for the vocabulary structure: otherwise
# the mapping from feature name to indices might depend on the memory
# layout of the machine. Furthermore sorted terms might make it
# possible to perform binary search in the feature names array.
self.vocabulary_ = dict(((t, i) for i, t in enumerate(sorted(terms))))
return self._term_count_dicts_to_matrix(term_counts_per_doc)
def transform(self, raw_documents):
"""Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided in the constructor.
Parameters
----------
raw_documents: iterable
an iterable which yields either str, unicode or file objects
Returns
-------
vectors: sparse matrix, [n_samples, n_features]
"""
if not hasattr(self, 'vocabulary_') or len(self.vocabulary_) == 0:
raise ValueError("Vocabulary wasn't fitted or is empty!")
# raw_documents can be an iterable so we don't know its size in
# advance
# XXX @larsmans tried to parallelize the following loop with joblib.
# The result was some 20% slower than the serial version.
analyze = self.build_analyzer()
term_counts_per_doc = [Counter(analyze(doc)) for doc in raw_documents]
return self._term_count_dicts_to_matrix(term_counts_per_doc)
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
if not hasattr(self, 'vocabulary_') or len(self.vocabulary_) == 0:
raise ValueError("Vocabulary wasn't fitted or is empty!")
return [t for t, i in sorted(iter(self.vocabulary_.items()),
key=itemgetter(1))]
| mit |
victorbergelin/scikit-learn | examples/preprocessing/plot_function_transformer.py | 161 | 1949 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
plt.scatter(X[:, 0], X[:, 1], c=y, s=50)
plt.show()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
s=50,
)
plt.show()
| bsd-3-clause |
alexsavio/scikit-learn | sklearn/utils/tests/test_class_weight.py | 50 | 13151 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
# Raise error when y has items not in classes
classes = np.arange(2)
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
assert_raises(ValueError, compute_class_weight, {0: 1., 1: 2.}, classes, y)
def test_compute_class_weight_dict():
classes = np.arange(3)
class_weights = {0: 1.0, 1: 2.0, 2: 3.0}
y = np.asarray([0, 0, 1, 2])
cw = compute_class_weight(class_weights, classes, y)
# When the user specifies class weights, compute_class_weights should just
# return them.
assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw)
# When a class weight is specified that isn't in classes, a ValueError
# should get raised
msg = 'Class label 4 not present.'
class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
msg = 'Class label -1 not present.'
class_weights = {-1: 5.0, 0: 1.0, 1: 2.0, 2: 3.0}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# duplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
RobertABT/heightmap | build/matplotlib/lib/matplotlib/tests/test_simplification.py | 2 | 7078 | from __future__ import print_function
import numpy as np
import matplotlib
from matplotlib.testing.decorators import image_comparison, knownfailureif, cleanup
import matplotlib.pyplot as plt
from pylab import *
import numpy as np
from matplotlib import patches, path, transforms
from nose.tools import raises
import io
nan = np.nan
Path = path.Path
# NOTE: All of these tests assume that path.simplify is set to True
# (the default)
@image_comparison(baseline_images=['clipping'], remove_text=True)
def test_clipping():
t = np.arange(0.0, 2.0, 0.01)
s = np.sin(2*pi*t)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(t, s, linewidth=1.0)
ax.set_ylim((-0.20, -0.28))
@image_comparison(baseline_images=['overflow'], remove_text=True)
def test_overflow():
x = np.array([1.0,2.0,3.0,2.0e5])
y = np.arange(len(x))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x,y)
ax.set_xlim(xmin=2,xmax=6)
@image_comparison(baseline_images=['clipping_diamond'], remove_text=True)
def test_diamond():
x = np.array([0.0, 1.0, 0.0, -1.0, 0.0])
y = np.array([1.0, 0.0, -1.0, 0.0, 1.0])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y)
ax.set_xlim(xmin=-0.6, xmax=0.6)
ax.set_ylim(ymin=-0.6, ymax=0.6)
@cleanup
def test_noise():
np.random.seed(0)
x = np.random.uniform(size=(5000,)) * 50
fig = plt.figure()
ax = fig.add_subplot(111)
p1 = ax.plot(x, solid_joinstyle='round', linewidth=2.0)
path = p1[0].get_path()
transform = p1[0].get_transform()
path = transform.transform_path(path)
simplified = list(path.iter_segments(simplify=(800, 600)))
assert len(simplified) == 3884
@cleanup
def test_sine_plus_noise():
np.random.seed(0)
x = np.sin(np.linspace(0, np.pi * 2.0, 1000)) + np.random.uniform(size=(1000,)) * 0.01
fig = plt.figure()
ax = fig.add_subplot(111)
p1 = ax.plot(x, solid_joinstyle='round', linewidth=2.0)
path = p1[0].get_path()
transform = p1[0].get_transform()
path = transform.transform_path(path)
simplified = list(path.iter_segments(simplify=(800, 600)))
assert len(simplified) == 876
@image_comparison(baseline_images=['simplify_curve'], remove_text=True)
def test_simplify_curve():
pp1 = patches.PathPatch(
Path([(0, 0), (1, 0), (1, 1), (nan, 1), (0, 0), (2, 0), (2, 2), (0, 0)],
[Path.MOVETO, Path.CURVE3, Path.CURVE3, Path.CURVE3, Path.CURVE3, Path.CURVE3, Path.CURVE3, Path.CLOSEPOLY]),
fc="none")
fig = plt.figure()
ax = fig.add_subplot(111)
ax.add_patch(pp1)
ax.set_xlim((0, 2))
ax.set_ylim((0, 2))
@image_comparison(baseline_images=['hatch_simplify'], remove_text=True)
def test_hatch():
fig = plt.figure()
ax = fig.add_subplot(111)
ax.add_patch(Rectangle((0, 0), 1, 1, fill=False, hatch="/"))
ax.set_xlim((0.45, 0.55))
ax.set_ylim((0.45, 0.55))
@image_comparison(baseline_images=['fft_peaks'], remove_text=True)
def test_fft_peaks():
fig = plt.figure()
t = arange(65536)
ax = fig.add_subplot(111)
p1 = ax.plot(abs(fft(sin(2*pi*.01*t)*blackman(len(t)))))
path = p1[0].get_path()
transform = p1[0].get_transform()
path = transform.transform_path(path)
simplified = list(path.iter_segments(simplify=(800, 600)))
assert len(simplified) == 20
@cleanup
def test_start_with_moveto():
# Should be entirely clipped away to a single MOVETO
data = b"""
ZwAAAAku+v9UAQAA+Tj6/z8CAADpQ/r/KAMAANlO+v8QBAAAyVn6//UEAAC6ZPr/2gUAAKpv+v+8
BgAAm3r6/50HAACLhfr/ewgAAHyQ+v9ZCQAAbZv6/zQKAABepvr/DgsAAE+x+v/lCwAAQLz6/7wM
AAAxx/r/kA0AACPS+v9jDgAAFN36/zQPAAAF6Pr/AxAAAPfy+v/QEAAA6f36/5wRAADbCPv/ZhIA
AMwT+/8uEwAAvh77//UTAACwKfv/uRQAAKM0+/98FQAAlT/7/z0WAACHSvv//RYAAHlV+/+7FwAA
bGD7/3cYAABea/v/MRkAAFF2+//pGQAARIH7/6AaAAA3jPv/VRsAACmX+/8JHAAAHKL7/7ocAAAP
rfv/ah0AAAO4+/8YHgAA9sL7/8QeAADpzfv/bx8AANzY+/8YIAAA0OP7/78gAADD7vv/ZCEAALf5
+/8IIgAAqwT8/6kiAACeD/z/SiMAAJIa/P/oIwAAhiX8/4QkAAB6MPz/HyUAAG47/P+4JQAAYkb8
/1AmAABWUfz/5SYAAEpc/P95JwAAPmf8/wsoAAAzcvz/nCgAACd9/P8qKQAAHIj8/7cpAAAQk/z/
QyoAAAWe/P/MKgAA+aj8/1QrAADus/z/2isAAOO+/P9eLAAA2Mn8/+AsAADM1Pz/YS0AAMHf/P/g
LQAAtur8/10uAACr9fz/2C4AAKEA/f9SLwAAlgv9/8ovAACLFv3/QDAAAIAh/f+1MAAAdSz9/ycx
AABrN/3/mDEAAGBC/f8IMgAAVk39/3UyAABLWP3/4TIAAEFj/f9LMwAANm79/7MzAAAsef3/GjQA
ACKE/f9+NAAAF4/9/+E0AAANmv3/QzUAAAOl/f+iNQAA+a/9/wA2AADvuv3/XDYAAOXF/f+2NgAA
29D9/w83AADR2/3/ZjcAAMfm/f+7NwAAvfH9/w44AACz/P3/XzgAAKkH/v+vOAAAnxL+//04AACW
Hf7/SjkAAIwo/v+UOQAAgjP+/905AAB5Pv7/JDoAAG9J/v9pOgAAZVT+/606AABcX/7/7zoAAFJq
/v8vOwAASXX+/207AAA/gP7/qjsAADaL/v/lOwAALZb+/x48AAAjof7/VTwAABqs/v+LPAAAELf+
/788AAAHwv7/8TwAAP7M/v8hPQAA9df+/1A9AADr4v7/fT0AAOLt/v+oPQAA2fj+/9E9AADQA///
+T0AAMYO//8fPgAAvRn//0M+AAC0JP//ZT4AAKsv//+GPgAAojr//6U+AACZRf//wj4AAJBQ///d
PgAAh1v///c+AAB+Zv//Dz8AAHRx//8lPwAAa3z//zk/AABih///TD8AAFmS//9dPwAAUJ3//2w/
AABHqP//ej8AAD6z//+FPwAANb7//48/AAAsyf//lz8AACPU//+ePwAAGt///6M/AAAR6v//pj8A
AAj1//+nPwAA/////w=="""
import base64
if hasattr(base64, 'encodebytes'):
# Python 3 case
decodebytes = base64.decodebytes
else:
# Python 2 case
decodebytes = base64.decodestring
verts = np.fromstring(decodebytes(data), dtype='<i4')
verts = verts.reshape((len(verts) / 2, 2))
path = Path(verts)
segs = path.iter_segments(transforms.IdentityTransform(), clip=(0.0, 0.0, 100.0, 100.0))
segs = list(segs)
assert len(segs) == 1
assert segs[0][1] == Path.MOVETO
@cleanup
@raises(OverflowError)
def test_throw_rendering_complexity_exceeded():
rcParams['path.simplify'] = False
xx = np.arange(200000)
yy = np.random.rand(200000)
yy[1000] = np.nan
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(xx, yy)
try:
fig.savefig(io.BytesIO())
finally:
rcParams['path.simplify'] = True
@image_comparison(baseline_images=['clipper_edge'], remove_text=True)
def test_clipper():
dat = (0, 1, 0, 2, 0, 3, 0, 4, 0, 5)
fig = plt.figure(figsize=(2, 1))
fig.subplots_adjust(left = 0, bottom = 0, wspace = 0, hspace = 0)
ax = fig.add_axes((0, 0, 1.0, 1.0), ylim = (0, 5), autoscale_on = False)
ax.plot(dat)
ax.xaxis.set_major_locator(plt.MultipleLocator(1))
ax.yaxis.set_major_locator(plt.MultipleLocator(1))
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xlim(5, 9)
@image_comparison(baseline_images=['para_equal_perp'], remove_text=True)
def test_para_equal_perp():
x = np.array([0, 1, 2, 1, 0, -1, 0, 1] + [1] * 128)
y = np.array([1, 1, 2, 1, 0, -1, 0, 0] + [0] * 128)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x + 1, y + 1)
ax.plot(x + 1, y + 1, 'ro')
@image_comparison(baseline_images=['clipping_with_nans'])
def test_clipping_with_nans():
x = np.linspace(0, 3.14 * 2, 3000)
y = np.sin(x)
x[::100] = np.nan
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y)
ax.set_ylim(-0.25, 0.25)
if __name__=='__main__':
import nose
nose.runmodule(argv=['-s','--with-doctest'], exit=False)
| mit |
epfl-lts2/pygsp | pygsp/graphs/ring.py | 1 | 2870 | # -*- coding: utf-8 -*-
import numpy as np
from scipy import sparse
from . import Graph # prevent circular import in Python < 3.5
class Ring(Graph):
r"""K-regular ring graph.
A signal on the ring graph is akin to a 1-dimensional periodic signal in
classical signal processing.
On the ring graph, the graph Fourier transform (GFT) is the classical
discrete Fourier transform (DFT_).
Actually, the Laplacian of the ring graph is a `circulant matrix`_, and any
circulant matrix is diagonalized by the DFT.
.. _DFT: https://en.wikipedia.org/wiki/Discrete_Fourier_transform
.. _circulant matrix: https://en.wikipedia.org/wiki/Circulant_matrix
Parameters
----------
N : int
Number of vertices.
k : int
Number of neighbors in each direction.
See Also
--------
Path : 1D line with even boundary conditions
Torus : Kronecker product of two ring graphs
Examples
--------
>>> import matplotlib.pyplot as plt
>>> G = graphs.Ring(N=10)
>>> fig, axes = plt.subplots(1, 2)
>>> _ = axes[0].spy(G.W)
>>> _ = G.plot(ax=axes[1])
The GFT of the ring graph is the classical DFT.
>>> from matplotlib import pyplot as plt
>>> n_eigenvectors = 4
>>> graph = graphs.Ring(30)
>>> fig, axes = plt.subplots(1, 2)
>>> graph.set_coordinates('line1D')
>>> graph.compute_fourier_basis()
>>> _ = graph.plot(graph.U[:, :n_eigenvectors], ax=axes[0])
>>> _ = axes[0].legend(range(n_eigenvectors))
>>> _ = axes[1].plot(graph.e, '.')
"""
def __init__(self, N=64, k=1, **kwargs):
self.k = k
if N < 3:
# Asymmetric graph needed for 2 as 2 distances connect them.
raise ValueError('There should be at least 3 vertices.')
if 2*k > N:
raise ValueError('Too many neighbors requested.')
if 2*k == N:
num_edges = N * (k - 1) + k
else:
num_edges = N * k
i_inds = np.zeros((2 * num_edges))
j_inds = np.zeros((2 * num_edges))
tmpN = np.arange(N, dtype=int)
for i in range(min(k, (N - 1) // 2)):
i_inds[2*i * N + tmpN] = tmpN
j_inds[2*i * N + tmpN] = np.remainder(tmpN + i + 1, N)
i_inds[(2*i + 1)*N + tmpN] = np.remainder(tmpN + i + 1, N)
j_inds[(2*i + 1)*N + tmpN] = tmpN
if 2*k == N:
i_inds[2*N*(k - 1) + tmpN] = tmpN
i_inds[2*N*(k - 1) + tmpN] = np.remainder(tmpN + k + 1, N)
W = sparse.csc_matrix((np.ones((2*num_edges)), (i_inds, j_inds)),
shape=(N, N))
plotting = {'limits': np.array([-1, 1, -1, 1])}
super(Ring, self).__init__(W, plotting=plotting, **kwargs)
self.set_coordinates('ring2D')
def _get_extra_repr(self):
return dict(k=self.k)
| bsd-3-clause |
RedPointyJackson/tfg | study_cases/dieharder/plot.py | 1 | 1126 | #!/usr/bin/env python3
def cm2inch(value):
return value/2.54
import matplotlib.pyplot as plt
import pandas as pd
import seaborn.apionly as sns
plt.style.use('custom')
failcolor = '#C44E52'
passcolor = '#55A868'
warncolor = '#FFA574'
df = pd.read_csv('data.csv')
fig = plt.figure(figsize=(cm2inch(15),cm2inch(6)))
ax = sns.stripplot(x='generator', y='value', data=df, jitter=True)
ax.set_xlabel('Generador')
ax.set_ylabel('p values')
fig.savefig('summary.pdf')
# for gen,gendf in df.groupby('generator'):
# fig = plt.figure(figsize=(cm2inch(2),cm2inch(2)))
# ax = fig.add_subplot(1,1,1)
# ax.set_ylabel('')
# ax.set_xlabel('')
# L = len(gendf['value'])
# vals = list(gendf['value'])
# for j in range(L):
# val = vals[j]
# color = passcolor
# marker = 'o'
# ps = 2
# if val < 0.01: color=failcolor; marker='x'; ps=5
# if val > 0.997: color=warncolor; marker='x'; ps=5
# ax.plot(j, val, marker, ms=ps, color=color)
# ax.set_xticks([])
# ax.set_yticks([0,0.5,1])
# ax.set_yticklabels([])
# fig.savefig('%s.pdf' % gen)
| mit |
droundy/deft | papers/water-saft/figs/density-compare.py | 1 | 1705 | #!/usr/bin/env python
#need this to run without xserver
import matplotlib
matplotlib.use('Agg')
import math
import matplotlib.pyplot as pyplot
import numpy
import pylab
from matplotlib.patches import Ellipse
nm = 18.8972613
gpermL=4.9388942e-3/0.996782051315 # conversion from atomic units to mass density
grey = '#999999'
blueish = '#99cccc'#'#aadddd' #'#55dae0'
rod = '#666666'
hugdata = pylab.loadtxt('figs/hughes-single-rod-1nm-density.dat')
rhug = hugdata[:, 0]/nm
hugdensity = hugdata[:, 1]/gpermL
p1, = pylab.plot(rhug, hugdensity, color = '#3333aa', linestyle='--')
newdata = pylab.loadtxt('figs/single-rod-1nm-density.dat')
rnew = newdata[:, 0]/nm
newdensity = newdata[:, 1]/gpermL
p2, = pylab.plot(rnew, newdensity, color = '#dd6677', linestyle='-')
pyplot.hlines(1, 0, 1.3, 'black', ':')
circleheight = 0.25
ymax = 3.1
rmax = 1.2
hardsphere_diameter = 3.0342/10 # nm
rod_radius = 0.25 # nm
pyplot.vlines([rod_radius - hardsphere_diameter/2], 0, ymax, rod, '-')
xpoints = [rod_radius + n*hardsphere_diameter for n in range(4)]
ypoints = [circleheight]*4
pyplot.plot(xpoints, ypoints, marker = 'o', color = 'black', linestyle = '')
fig = pyplot.gcf()
for n in range(4):
xpos = rod_radius + n*hardsphere_diameter
pyplot.vlines(xpos, 0, ymax, grey, ':')
fig.gca().add_artist(Ellipse((xpos, circleheight),
hardsphere_diameter, 1.2*hardsphere_diameter*ymax/rmax,
color = blueish, fill=False))
#plot properties
pyplot.ylabel('Density (g/mL)')
pyplot.xlabel('Radius (nm)')
pyplot.ylim(0, ymax)
pyplot.xlim(0, rmax)
pyplot.legend([p1, p2], ["Hughes, et al", "This work"])
pyplot.savefig('figs/density-compare.pdf')
| gpl-2.0 |
JohnGriffiths/nipype | nipype/algorithms/misc.py | 9 | 51269 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
'''
Miscellaneous algorithms
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname(os.path.realpath(__file__))
>>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data'))
>>> os.chdir(datadir)
'''
import os
import os.path as op
import nibabel as nb
import numpy as np
from math import floor, ceil
from scipy.ndimage.morphology import grey_dilation
from scipy.ndimage.morphology import binary_erosion
from scipy.spatial.distance import cdist, euclidean, dice, jaccard
from scipy.ndimage.measurements import center_of_mass, label
from scipy.special import legendre
import scipy.io as sio
import itertools
import scipy.stats as stats
from nipype import logging
import warnings
import metrics as nam
from ..interfaces.base import (BaseInterface, traits, TraitedSpec, File,
InputMultiPath, OutputMultiPath,
BaseInterfaceInputSpec, isdefined,
DynamicTraitedSpec, Undefined)
from nipype.utils.filemanip import fname_presuffix, split_filename
iflogger = logging.getLogger('interface')
class PickAtlasInputSpec(BaseInterfaceInputSpec):
atlas = File(exists=True, desc="Location of the atlas that will be used.",
mandatory=True)
labels = traits.Either(
traits.Int, traits.List(traits.Int),
desc=("Labels of regions that will be included in the mask. Must be\
compatible with the atlas used."),
mandatory=True
)
hemi = traits.Enum(
'both', 'left', 'right',
desc="Restrict the mask to only one hemisphere: left or right",
usedefault=True
)
dilation_size = traits.Int(
usedefault=True,
desc="Defines how much the mask will be dilated (expanded in 3D)."
)
output_file = File(desc="Where to store the output mask.")
class PickAtlasOutputSpec(TraitedSpec):
mask_file = File(exists=True, desc="output mask file")
class PickAtlas(BaseInterface):
"""Returns ROI masks given an atlas and a list of labels. Supports dilation
and left right masking (assuming the atlas is properly aligned).
"""
input_spec = PickAtlasInputSpec
output_spec = PickAtlasOutputSpec
def _run_interface(self, runtime):
nim = self._get_brodmann_area()
nb.save(nim, self._gen_output_filename())
return runtime
def _gen_output_filename(self):
if not isdefined(self.inputs.output_file):
output = fname_presuffix(fname=self.inputs.atlas, suffix="_mask",
newpath=os.getcwd(), use_ext=True)
else:
output = os.path.realpath(self.inputs.output_file)
return output
def _get_brodmann_area(self):
nii = nb.load(self.inputs.atlas)
origdata = nii.get_data()
newdata = np.zeros(origdata.shape)
if not isinstance(self.inputs.labels, list):
labels = [self.inputs.labels]
else:
labels = self.inputs.labels
for lab in labels:
newdata[origdata == lab] = 1
if self.inputs.hemi == 'right':
newdata[floor(float(origdata.shape[0]) / 2):, :, :] = 0
elif self.inputs.hemi == 'left':
newdata[:ceil(float(origdata.shape[0]) / 2), :, :] = 0
if self.inputs.dilation_size != 0:
newdata = grey_dilation(
newdata, (2 * self.inputs.dilation_size + 1,
2 * self.inputs.dilation_size +
1,
2 * self.inputs.dilation_size + 1))
return nb.Nifti1Image(newdata, nii.get_affine(), nii.get_header())
def _list_outputs(self):
outputs = self._outputs().get()
outputs['mask_file'] = self._gen_output_filename()
return outputs
class SimpleThresholdInputSpec(BaseInterfaceInputSpec):
volumes = InputMultiPath(
File(exists=True), desc='volumes to be thresholded', mandatory=True)
threshold = traits.Float(
desc='volumes to be thresholdedeverything below this value will be set\
to zero',
mandatory=True
)
class SimpleThresholdOutputSpec(TraitedSpec):
thresholded_volumes = OutputMultiPath(
File(exists=True), desc="thresholded volumes")
class SimpleThreshold(BaseInterface):
"""Applies a threshold to input volumes
"""
input_spec = SimpleThresholdInputSpec
output_spec = SimpleThresholdOutputSpec
def _run_interface(self, runtime):
for fname in self.inputs.volumes:
img = nb.load(fname)
data = np.array(img.get_data())
active_map = data > self.inputs.threshold
thresholded_map = np.zeros(data.shape)
thresholded_map[active_map] = data[active_map]
new_img = nb.Nifti1Image(
thresholded_map, img.get_affine(), img.get_header())
_, base, _ = split_filename(fname)
nb.save(new_img, base + '_thresholded.nii')
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs["thresholded_volumes"] = []
for fname in self.inputs.volumes:
_, base, _ = split_filename(fname)
outputs["thresholded_volumes"].append(
os.path.abspath(base + '_thresholded.nii'))
return outputs
class ModifyAffineInputSpec(BaseInterfaceInputSpec):
volumes = InputMultiPath(
File(exists=True),
desc='volumes which affine matrices will be modified',
mandatory=True
)
transformation_matrix = traits.Array(
value=np.eye(4),
shape=(4, 4),
desc="transformation matrix that will be left multiplied by the\
affine matrix",
usedefault=True
)
class ModifyAffineOutputSpec(TraitedSpec):
transformed_volumes = OutputMultiPath(File(exist=True))
class ModifyAffine(BaseInterface):
"""Left multiplies the affine matrix with a specified values. Saves the volume
as a nifti file.
"""
input_spec = ModifyAffineInputSpec
output_spec = ModifyAffineOutputSpec
def _gen_output_filename(self, name):
_, base, _ = split_filename(name)
return os.path.abspath(base + "_transformed.nii")
def _run_interface(self, runtime):
for fname in self.inputs.volumes:
img = nb.load(fname)
affine = img.get_affine()
affine = np.dot(self.inputs.transformation_matrix, affine)
nb.save(nb.Nifti1Image(img.get_data(), affine,
img.get_header()), self._gen_output_filename(fname))
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['transformed_volumes'] = []
for fname in self.inputs.volumes:
outputs['transformed_volumes'].append(
self._gen_output_filename(fname))
return outputs
class CreateNiftiInputSpec(BaseInterfaceInputSpec):
data_file = File(exists=True, mandatory=True, desc="ANALYZE img file")
header_file = File(
exists=True, mandatory=True, desc="corresponding ANALYZE hdr file")
affine = traits.Array(desc="affine transformation array")
class CreateNiftiOutputSpec(TraitedSpec):
nifti_file = File(exists=True)
class CreateNifti(BaseInterface):
"""Creates a nifti volume
"""
input_spec = CreateNiftiInputSpec
output_spec = CreateNiftiOutputSpec
def _gen_output_file_name(self):
_, base, _ = split_filename(self.inputs.data_file)
return os.path.abspath(base + ".nii")
def _run_interface(self, runtime):
hdr = nb.AnalyzeHeader.from_fileobj(
open(self.inputs.header_file, 'rb'))
if isdefined(self.inputs.affine):
affine = self.inputs.affine
else:
affine = None
data = hdr.data_from_fileobj(open(self.inputs.data_file, 'rb'))
img = nb.Nifti1Image(data, affine, hdr)
nb.save(img, self._gen_output_file_name())
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['nifti_file'] = self._gen_output_file_name()
return outputs
class TSNRInputSpec(BaseInterfaceInputSpec):
in_file = InputMultiPath(File(exists=True), mandatory=True,
desc='realigned 4D file or a list of 3D files')
regress_poly = traits.Range(low=1, desc='Remove polynomials')
class TSNROutputSpec(TraitedSpec):
tsnr_file = File(exists=True, desc='tsnr image file')
mean_file = File(exists=True, desc='mean image file')
stddev_file = File(exists=True, desc='std dev image file')
detrended_file = File(desc='detrended input file')
class TSNR(BaseInterface):
"""Computes the time-course SNR for a time series
Typically you want to run this on a realigned time-series.
Example
-------
>>> tsnr = TSNR()
>>> tsnr.inputs.in_file = 'functional.nii'
>>> res = tsnr.run() # doctest: +SKIP
"""
input_spec = TSNRInputSpec
output_spec = TSNROutputSpec
def _gen_output_file_name(self, suffix=None):
_, base, ext = split_filename(self.inputs.in_file[0])
if suffix in ['mean', 'stddev']:
return os.path.abspath(base + "_tsnr_" + suffix + ext)
elif suffix in ['detrended']:
return os.path.abspath(base + "_" + suffix + ext)
else:
return os.path.abspath(base + "_tsnr" + ext)
def _run_interface(self, runtime):
img = nb.load(self.inputs.in_file[0])
header = img.get_header().copy()
vollist = [nb.load(filename) for filename in self.inputs.in_file]
data = np.concatenate([vol.get_data().reshape(
vol.get_shape()[:3] + (-1,)) for vol in vollist], axis=3)
if data.dtype.kind == 'i':
header.set_data_dtype(np.float32)
data = data.astype(np.float32)
if isdefined(self.inputs.regress_poly):
timepoints = img.get_shape()[-1]
X = np.ones((timepoints, 1))
for i in range(self.inputs.regress_poly):
X = np.hstack((X, legendre(
i + 1)(np.linspace(-1, 1, timepoints))[:, None]))
betas = np.dot(np.linalg.pinv(X), np.rollaxis(data, 3, 2))
datahat = np.rollaxis(np.dot(X[:, 1:],
np.rollaxis(
betas[1:, :, :, :], 0, 3)),
0, 4)
data = data - datahat
img = nb.Nifti1Image(data, img.get_affine(), header)
nb.save(img, self._gen_output_file_name('detrended'))
meanimg = np.mean(data, axis=3)
stddevimg = np.std(data, axis=3)
tsnr = meanimg / stddevimg
img = nb.Nifti1Image(tsnr, img.get_affine(), header)
nb.save(img, self._gen_output_file_name())
img = nb.Nifti1Image(meanimg, img.get_affine(), header)
nb.save(img, self._gen_output_file_name('mean'))
img = nb.Nifti1Image(stddevimg, img.get_affine(), header)
nb.save(img, self._gen_output_file_name('stddev'))
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['tsnr_file'] = self._gen_output_file_name()
outputs['mean_file'] = self._gen_output_file_name('mean')
outputs['stddev_file'] = self._gen_output_file_name('stddev')
if isdefined(self.inputs.regress_poly):
outputs['detrended_file'] = self._gen_output_file_name('detrended')
return outputs
class GunzipInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True)
class GunzipOutputSpec(TraitedSpec):
out_file = File(exists=True)
class Gunzip(BaseInterface):
"""Gunzip wrapper
"""
input_spec = GunzipInputSpec
output_spec = GunzipOutputSpec
def _gen_output_file_name(self):
_, base, ext = split_filename(self.inputs.in_file)
if ext[-2:].lower() == ".gz":
ext = ext[:-3]
return os.path.abspath(base + ext[:-3])
def _run_interface(self, runtime):
import gzip
in_file = gzip.open(self.inputs.in_file, 'rb')
out_file = open(self._gen_output_file_name(), 'wb')
out_file.write(in_file.read())
out_file.close()
in_file.close()
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_file'] = self._gen_output_file_name()
return outputs
def replaceext(in_list, ext):
out_list = list()
for filename in in_list:
path, name, _ = split_filename(op.abspath(filename))
out_name = op.join(path, name) + ext
out_list.append(out_name)
return out_list
def matlab2csv(in_array, name, reshape):
output_array = np.asarray(in_array)
if reshape:
if len(np.shape(output_array)) > 1:
output_array = np.reshape(output_array, (
np.shape(output_array)[0]*np.shape(output_array)[1], 1))
iflogger.info(np.shape(output_array))
output_name = op.abspath(name + '.csv')
np.savetxt(output_name, output_array, delimiter=',')
return output_name
class Matlab2CSVInputSpec(TraitedSpec):
in_file = File(exists=True, mandatory=True, desc='Input MATLAB .mat file')
reshape_matrix = traits.Bool(
True, usedefault=True,
desc='The output of this interface is meant for R, so matrices will be\
reshaped to vectors by default.'
)
class Matlab2CSVOutputSpec(TraitedSpec):
csv_files = OutputMultiPath(
File(desc='Output CSV files for each variable saved in the input .mat\
file')
)
class Matlab2CSV(BaseInterface):
"""Simple interface to save the components of a MATLAB .mat file as a text
file with comma-separated values (CSVs).
CSV files are easily loaded in R, for use in statistical processing.
For further information, see cran.r-project.org/doc/manuals/R-data.pdf
Example
-------
>>> from nipype.algorithms import misc
>>> mat2csv = misc.Matlab2CSV()
>>> mat2csv.inputs.in_file = 'cmatrix.mat'
>>> mat2csv.run() # doctest: +SKIP
"""
input_spec = Matlab2CSVInputSpec
output_spec = Matlab2CSVOutputSpec
def _run_interface(self, runtime):
in_dict = sio.loadmat(op.abspath(self.inputs.in_file))
# Check if the file has multiple variables in it. If it does, loop
# through them and save them as individual CSV files.
# If not, save the variable as a single CSV file using the input file
# name and a .csv extension.
saved_variables = list()
for key in in_dict.keys():
if not key.startswith('__'):
if isinstance(in_dict[key][0], np.ndarray):
saved_variables.append(key)
else:
iflogger.info('One of the keys in the input file, {k}, is not a Numpy array'.format(k=key))
if len(saved_variables) > 1:
iflogger.info(
'{N} variables found:'.format(N=len(saved_variables)))
iflogger.info(saved_variables)
for variable in saved_variables:
iflogger.info(
'...Converting {var} - type {ty} - to\
CSV'.format(var=variable, ty=type(in_dict[variable]))
)
matlab2csv(
in_dict[variable], variable, self.inputs.reshape_matrix)
elif len(saved_variables) == 1:
_, name, _ = split_filename(self.inputs.in_file)
variable = saved_variables[0]
iflogger.info('Single variable found {var}, type {ty}:'.format(
var=variable, ty=type(in_dict[variable])))
iflogger.info('...Converting {var} to CSV from {f}'.format(
var=variable, f=self.inputs.in_file))
matlab2csv(in_dict[variable], name, self.inputs.reshape_matrix)
else:
iflogger.error('No values in the MATLAB file?!')
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
in_dict = sio.loadmat(op.abspath(self.inputs.in_file))
saved_variables = list()
for key in in_dict.keys():
if not key.startswith('__'):
if isinstance(in_dict[key][0], np.ndarray):
saved_variables.append(key)
else:
iflogger.error('One of the keys in the input file, {k}, is\
not a Numpy array'.format(k=key))
if len(saved_variables) > 1:
outputs['csv_files'] = replaceext(saved_variables, '.csv')
elif len(saved_variables) == 1:
_, name, ext = split_filename(self.inputs.in_file)
outputs['csv_files'] = op.abspath(name + '.csv')
else:
iflogger.error('No values in the MATLAB file?!')
return outputs
def merge_csvs(in_list):
for idx, in_file in enumerate(in_list):
try:
in_array = np.loadtxt(in_file, delimiter=',')
except ValueError, ex:
try:
in_array = np.loadtxt(in_file, delimiter=',', skiprows=1)
except ValueError, ex:
first = open(in_file, 'r')
header_line = first.readline()
header_list = header_line.split(',')
n_cols = len(header_list)
try:
in_array = np.loadtxt(
in_file, delimiter=',', skiprows=1,
usecols=range(1, n_cols)
)
except ValueError, ex:
in_array = np.loadtxt(
in_file, delimiter=',', skiprows=1, usecols=range(1, n_cols-1))
if idx == 0:
out_array = in_array
else:
out_array = np.dstack((out_array, in_array))
out_array = np.squeeze(out_array)
iflogger.info('Final output array shape:')
iflogger.info(np.shape(out_array))
return out_array
def remove_identical_paths(in_files):
import os.path as op
from nipype.utils.filemanip import split_filename
if len(in_files) > 1:
out_names = list()
commonprefix = op.commonprefix(in_files)
lastslash = commonprefix.rfind('/')
commonpath = commonprefix[0:(lastslash+1)]
for fileidx, in_file in enumerate(in_files):
path, name, ext = split_filename(in_file)
in_file = op.join(path, name)
name = in_file.replace(commonpath, '')
name = name.replace('_subject_id_', '')
out_names.append(name)
else:
path, name, ext = split_filename(in_files[0])
out_names = [name]
return out_names
def maketypelist(rowheadings, shape, extraheadingBool, extraheading):
typelist = []
if rowheadings:
typelist.append(('heading', 'a40'))
if len(shape) > 1:
for idx in range(1, (min(shape)+1)):
typelist.append((str(idx), float))
else:
for idx in range(1, (shape[0]+1)):
typelist.append((str(idx), float))
if extraheadingBool:
typelist.append((extraheading, 'a40'))
iflogger.info(typelist)
return typelist
def makefmtlist(output_array, typelist, rowheadingsBool,
shape, extraheadingBool):
fmtlist = []
if rowheadingsBool:
fmtlist.append('%s')
if len(shape) > 1:
output = np.zeros(max(shape), typelist)
for idx in range(1, min(shape)+1):
output[str(idx)] = output_array[:, idx-1]
fmtlist.append('%f')
else:
output = np.zeros(1, typelist)
for idx in range(1, len(output_array)+1):
output[str(idx)] = output_array[idx-1]
fmtlist.append('%f')
if extraheadingBool:
fmtlist.append('%s')
fmt = ','.join(fmtlist)
return fmt, output
class MergeCSVFilesInputSpec(TraitedSpec):
in_files = InputMultiPath(File(exists=True), mandatory=True,
desc='Input comma-separated value (CSV) files')
out_file = File('merged.csv', usedefault=True,
desc='Output filename for merged CSV file')
column_headings = traits.List(
traits.Str, desc='List of column headings to save in merged CSV file\
(must be equal to number of input files). If left undefined, these\
will be pulled from the input filenames.')
row_headings = traits.List(
traits.Str, desc='List of row headings to save in merged CSV file\
(must be equal to number of rows in the input files).')
row_heading_title = traits.Str(
'label', usedefault=True, desc='Column heading for the row headings\
added')
extra_column_heading = traits.Str(
desc='New heading to add for the added field.')
extra_field = traits.Str(
desc='New field to add to each row. This is useful for saving the\
group or subject ID in the file.')
class MergeCSVFilesOutputSpec(TraitedSpec):
csv_file = File(desc='Output CSV file containing columns ')
class MergeCSVFiles(BaseInterface):
"""This interface is designed to facilitate data loading in the R environment.
It takes input CSV files and merges them into a single CSV file.
If provided, it will also incorporate column heading names into the
resulting CSV file.
CSV files are easily loaded in R, for use in statistical processing.
For further information, see cran.r-project.org/doc/manuals/R-data.pdf
Example
-------
>>> from nipype.algorithms import misc
>>> mat2csv = misc.MergeCSVFiles()
>>> mat2csv.inputs.in_files = ['degree.mat','clustering.mat']
>>> mat2csv.inputs.column_headings = ['degree','clustering']
>>> mat2csv.run() # doctest: +SKIP
"""
input_spec = MergeCSVFilesInputSpec
output_spec = MergeCSVFilesOutputSpec
def _run_interface(self, runtime):
extraheadingBool = False
extraheading = ''
rowheadingsBool = False
"""
This block defines the column headings.
"""
if isdefined(self.inputs.column_headings):
iflogger.info('Column headings have been provided:')
headings = self.inputs.column_headings
else:
iflogger.info(
'Column headings not provided! Pulled from input filenames:')
headings = remove_identical_paths(self.inputs.in_files)
if isdefined(self.inputs.extra_field):
if isdefined(self.inputs.extra_column_heading):
extraheading = self.inputs.extra_column_heading
iflogger.info('Extra column heading provided: {col}'.format(
col=extraheading))
else:
extraheading = 'type'
iflogger.info(
'Extra column heading was not defined. Using "type"')
headings.append(extraheading)
extraheadingBool = True
if len(self.inputs.in_files) == 1:
iflogger.warn('Only one file input!')
if isdefined(self.inputs.row_headings):
iflogger.info('Row headings have been provided. Adding "labels"\
column header.')
prefix = '"{p}","'.format(p=self.inputs.row_heading_title)
csv_headings = prefix + '","'.join(itertools.chain(
headings)) + '"\n'
rowheadingsBool = True
else:
iflogger.info('Row headings have not been provided.')
csv_headings = '"' + '","'.join(itertools.chain(headings)) + '"\n'
iflogger.info('Final Headings:')
iflogger.info(csv_headings)
"""
Next we merge the arrays and define the output text file
"""
output_array = merge_csvs(self.inputs.in_files)
_, name, ext = split_filename(self.inputs.out_file)
if not ext == '.csv':
ext = '.csv'
out_file = op.abspath(name + ext)
file_handle = open(out_file, 'w')
file_handle.write(csv_headings)
shape = np.shape(output_array)
typelist = maketypelist(
rowheadingsBool, shape, extraheadingBool, extraheading)
fmt, output = makefmtlist(
output_array, typelist, rowheadingsBool, shape, extraheadingBool)
if rowheadingsBool:
row_heading_list = self.inputs.row_headings
row_heading_list_with_quotes = []
for row_heading in row_heading_list:
row_heading_with_quotes = '"' + row_heading + '"'
row_heading_list_with_quotes.append(row_heading_with_quotes)
row_headings = np.array(row_heading_list_with_quotes, dtype='|S40')
output['heading'] = row_headings
if isdefined(self.inputs.extra_field):
extrafieldlist = []
if len(shape) > 1:
mx = shape[0]
else:
mx = 1
for idx in range(0, mx):
extrafieldlist.append(self.inputs.extra_field)
iflogger.info(len(extrafieldlist))
output[extraheading] = extrafieldlist
iflogger.info(output)
iflogger.info(fmt)
np.savetxt(file_handle, output, fmt, delimiter=',')
file_handle.close()
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
_, name, ext = split_filename(self.inputs.out_file)
if not ext == '.csv':
ext = '.csv'
out_file = op.abspath(name + ext)
outputs['csv_file'] = out_file
return outputs
class AddCSVColumnInputSpec(TraitedSpec):
in_file = File(exists=True, mandatory=True,
desc='Input comma-separated value (CSV) files')
out_file = File('extra_heading.csv', usedefault=True,
desc='Output filename for merged CSV file')
extra_column_heading = traits.Str(
desc='New heading to add for the added field.')
extra_field = traits.Str(
desc='New field to add to each row. This is useful for saving the\
group or subject ID in the file.')
class AddCSVColumnOutputSpec(TraitedSpec):
csv_file = File(desc='Output CSV file containing columns ')
class AddCSVColumn(BaseInterface):
"""Short interface to add an extra column and field to a text file
Example
-------
>>> from nipype.algorithms import misc
>>> addcol = misc.AddCSVColumn()
>>> addcol.inputs.in_file = 'degree.csv'
>>> addcol.inputs.extra_column_heading = 'group'
>>> addcol.inputs.extra_field = 'male'
>>> addcol.run() # doctest: +SKIP
"""
input_spec = AddCSVColumnInputSpec
output_spec = AddCSVColumnOutputSpec
def _run_interface(self, runtime):
in_file = open(self.inputs.in_file, 'r')
_, name, ext = split_filename(self.inputs.out_file)
if not ext == '.csv':
ext = '.csv'
out_file = op.abspath(name + ext)
out_file = open(out_file, 'w')
firstline = in_file.readline()
firstline = firstline.replace('\n', '')
new_firstline = firstline + ',"' + \
self.inputs.extra_column_heading + '"\n'
out_file.write(new_firstline)
for line in in_file:
new_line = line.replace('\n', '')
new_line = new_line + ',' + self.inputs.extra_field + '\n'
out_file.write(new_line)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
_, name, ext = split_filename(self.inputs.out_file)
if not ext == '.csv':
ext = '.csv'
out_file = op.abspath(name + ext)
outputs['csv_file'] = out_file
return outputs
class AddCSVRowInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec):
in_file = traits.File(mandatory=True,
desc='Input comma-separated value (CSV) files')
_outputs = traits.Dict(traits.Any, value={}, usedefault=True)
def __setattr__(self, key, value):
if key not in self.copyable_trait_names():
if not isdefined(value):
super(AddCSVRowInputSpec, self).__setattr__(key, value)
self._outputs[key] = value
else:
if key in self._outputs:
self._outputs[key] = value
super(AddCSVRowInputSpec, self).__setattr__(key, value)
class AddCSVRowOutputSpec(TraitedSpec):
csv_file = File(desc='Output CSV file containing rows ')
class AddCSVRow(BaseInterface):
"""Simple interface to add an extra row to a csv file
.. note:: Requires `pandas <http://pandas.pydata.org/>`_
.. warning:: Multi-platform thread-safe execution is possible with
`lockfile <https://pythonhosted.org/lockfile/lockfile.html>`_. Please
recall that (1) this module is alpha software; and (2) it should be
installed for thread-safe writing.
If lockfile is not installed, then the interface is not thread-safe.
Example
-------
>>> from nipype.algorithms import misc
>>> addrow = misc.AddCSVRow()
>>> addrow.inputs.in_file = 'scores.csv'
>>> addrow.inputs.si = 0.74
>>> addrow.inputs.di = 0.93
>>> addrow.inputs.subject_id = 'S400'
>>> addrow.inputs.list_of_values = [ 0.4, 0.7, 0.3 ]
>>> addrow.run() # doctest: +SKIP
"""
input_spec = AddCSVRowInputSpec
output_spec = AddCSVRowOutputSpec
def __init__(self, infields=None, force_run=True, **kwargs):
super(AddCSVRow, self).__init__(**kwargs)
undefined_traits = {}
self._infields = infields
self._have_lock = False
self._lock = None
if infields:
for key in infields:
self.inputs.add_trait(key, traits.Any)
self.inputs._outputs[key] = Undefined
undefined_traits[key] = Undefined
self.inputs.trait_set(trait_change_notify=False, **undefined_traits)
if force_run:
self._always_run = True
def _run_interface(self, runtime):
try:
import pandas as pd
except ImportError:
raise ImportError(('This interface requires pandas '
'(http://pandas.pydata.org/) to run.'))
try:
import lockfile as pl
self._have_lock = True
except ImportError:
from warnings import warn
warn(('Python module lockfile was not found: AddCSVRow will not be'
' thread-safe in multi-processor execution'))
input_dict = {}
for key, val in self.inputs._outputs.items():
# expand lists to several columns
if key == 'trait_added' and val in self.inputs.copyable_trait_names():
continue
if isinstance(val, list):
for i, v in enumerate(val):
input_dict['%s_%d' % (key, i)] = v
else:
input_dict[key] = val
df = pd.DataFrame([input_dict])
if self._have_lock:
self._lock = pl.FileLock(self.inputs.in_file)
# Acquire lock
self._lock.acquire()
if op.exists(self.inputs.in_file):
formerdf = pd.read_csv(self.inputs.in_file, index_col=0)
df = pd.concat([formerdf, df], ignore_index=True)
with open(self.inputs.in_file, 'w') as f:
df.to_csv(f)
if self._have_lock:
self._lock.release()
# Using nipype.external.portalocker this might be something like:
# with pl.Lock(self.inputs.in_file, timeout=1) as fh:
# if op.exists(fh):
# formerdf = pd.read_csv(fh, index_col=0)
# df = pd.concat([formerdf, df], ignore_index=True)
# df.to_csv(fh)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['csv_file'] = self.inputs.in_file
return outputs
def _outputs(self):
return self._add_output_traits(super(AddCSVRow, self)._outputs())
def _add_output_traits(self, base):
return base
class CalculateNormalizedMomentsInputSpec(TraitedSpec):
timeseries_file = File(
exists=True, mandatory=True,
desc='Text file with timeseries in columns and timepoints in rows,\
whitespace separated')
moment = traits.Int(
mandatory=True,
desc="Define which moment should be calculated, 3 for skewness, 4 for\
kurtosis.")
class CalculateNormalizedMomentsOutputSpec(TraitedSpec):
moments = traits.List(traits.Float(), desc='Moments')
class CalculateNormalizedMoments(BaseInterface):
"""Calculates moments of timeseries.
Example
-------
>>> from nipype.algorithms import misc
>>> skew = misc.CalculateNormalizedMoments()
>>> skew.inputs.moment = 3
>>> skew.inputs.timeseries_file = 'timeseries.txt'
>>> skew.run() # doctest: +SKIP
"""
input_spec = CalculateNormalizedMomentsInputSpec
output_spec = CalculateNormalizedMomentsOutputSpec
def _run_interface(self, runtime):
self._moments = calc_moments(
self.inputs.timeseries_file, self.inputs.moment)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['skewness'] = self._moments
return outputs
def calc_moments(timeseries_file, moment):
"""Returns nth moment (3 for skewness, 4 for kurtosis) of timeseries
(list of values; one per timeseries).
Keyword arguments:
timeseries_file -- text file with white space separated timepoints in rows
"""
timeseries = np.genfromtxt(timeseries_file)
m2 = stats.moment(timeseries, 2, axis=0)
m3 = stats.moment(timeseries, moment, axis=0)
zero = (m2 == 0)
return np.where(zero, 0, m3 / m2**(moment/2.0))
class AddNoiseInputSpec(TraitedSpec):
in_file = File(exists=True, mandatory=True,
desc='input image that will be corrupted with noise')
in_mask = File(exists=True, desc=('input mask, voxels outside this mask '
'will be considered background'))
snr = traits.Float(10.0, desc='desired output SNR in dB', usedefault=True)
dist = traits.Enum('normal', 'rician', usedefault=True, mandatory=True,
desc=('desired noise distribution'))
bg_dist = traits.Enum('normal', 'rayleigh', usedefault=True, mandatory=True,
desc=('desired noise distribution, currently '
'only normal is implemented'))
out_file = File(desc='desired output filename')
class AddNoiseOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='corrupted image')
class AddNoise(BaseInterface):
"""
Corrupts with noise the input image
Example
-------
>>> from nipype.algorithms.misc import AddNoise
>>> noise = AddNoise()
>>> noise.inputs.in_file = 'T1.nii'
>>> noise.inputs.in_mask = 'mask.nii'
>>> noise.snr = 30.0
>>> noise.run() # doctest: +SKIP
"""
input_spec = AddNoiseInputSpec
output_spec = AddNoiseOutputSpec
def _run_interface(self, runtime):
in_image = nb.load(self.inputs.in_file)
in_data = in_image.get_data()
snr = self.inputs.snr
if isdefined(self.inputs.in_mask):
in_mask = nb.load(self.inputs.in_mask).get_data()
else:
in_mask = np.ones_like(in_data)
result = self.gen_noise(in_data, mask=in_mask, snr_db=snr,
dist=self.inputs.dist, bg_dist=self.inputs.bg_dist)
res_im = nb.Nifti1Image(result, in_image.get_affine(), in_image.get_header())
res_im.to_filename(self._gen_output_filename())
return runtime
def _gen_output_filename(self):
if not isdefined(self.inputs.out_file):
_, base, ext = split_filename(self.inputs.in_file)
out_file = os.path.abspath('%s_SNR%03.2f%s' % (base, self.inputs.snr, ext))
else:
out_file = self.inputs.out_file
return out_file
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = self._gen_output_filename()
return outputs
def gen_noise(self, image, mask=None, snr_db=10.0, dist='normal', bg_dist='normal'):
"""
Generates a copy of an image with a certain amount of
added gaussian noise (rayleigh for background in mask)
"""
from math import sqrt
snr = sqrt(np.power(10.0, snr_db/10.0))
if mask is None:
mask = np.ones_like(image)
else:
mask[mask > 0] = 1
mask[mask < 1] = 0
if mask.ndim < image.ndim:
mask = np.rollaxis(np.array([mask]*image.shape[3]), 0, 4)
signal = image[mask > 0].reshape(-1)
if dist == 'normal':
signal = signal - signal.mean()
sigma_n = sqrt(signal.var()/snr)
noise = np.random.normal(size=image.shape, scale=sigma_n)
if (np.any(mask == 0)) and (bg_dist == 'rayleigh'):
bg_noise = np.random.rayleigh(size=image.shape, scale=sigma_n)
noise[mask == 0] = bg_noise[mask == 0]
im_noise = image + noise
elif dist == 'rician':
sigma_n = signal.mean()/snr
n_1 = np.random.normal(size=image.shape, scale=sigma_n)
n_2 = np.random.normal(size=image.shape, scale=sigma_n)
stde_1 = n_1/sqrt(2.0)
stde_2 = n_2/sqrt(2.0)
im_noise = np.sqrt((image + stde_1)**2 + (stde_2)**2)
else:
raise NotImplementedError(('Only normal and rician distributions '
'are supported'))
return im_noise
class NormalizeProbabilityMapSetInputSpec(TraitedSpec):
in_files = InputMultiPath(File(exists=True, mandatory=True,
desc='The tpms to be normalized'))
in_mask = File(exists=True,
desc='Masked voxels must sum up 1.0, 0.0 otherwise.')
class NormalizeProbabilityMapSetOutputSpec(TraitedSpec):
out_files = OutputMultiPath(File(exists=True),
desc="normalized maps")
class NormalizeProbabilityMapSet(BaseInterface):
""" Returns the input tissue probability maps (tpms, aka volume fractions)
normalized to sum up 1.0 at each voxel within the mask.
.. note:: Please recall this is not a spatial normalization algorithm
Example
-------
>>> from nipype.algorithms import misc
>>> normalize = misc.NormalizeProbabilityMapSet()
>>> normalize.inputs.in_files = [ 'tpm_00.nii.gz', 'tpm_01.nii.gz', \
'tpm_02.nii.gz' ]
>>> normalize.inputs.in_mask = 'tpms_msk.nii.gz'
>>> normalize.run() # doctest: +SKIP
"""
input_spec = NormalizeProbabilityMapSetInputSpec
output_spec = NormalizeProbabilityMapSetOutputSpec
def _run_interface(self, runtime):
mask = None
if isdefined(self.inputs.in_mask):
mask = self.inputs.in_mask
self._out_filenames = normalize_tpms(self.inputs.in_files, mask)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_files'] = self._out_filenames
return outputs
class SplitROIsInputSpec(TraitedSpec):
in_file = File(exists=True, mandatory=True,
desc='file to be splitted')
in_mask = File(exists=True, desc='only process files inside mask')
roi_size = traits.Tuple(traits.Int, traits.Int, traits.Int,
desc='desired ROI size')
class SplitROIsOutputSpec(TraitedSpec):
out_files = OutputMultiPath(File(exists=True),
desc='the resulting ROIs')
out_masks = OutputMultiPath(File(exists=True),
desc='a mask indicating valid values')
out_index = OutputMultiPath(File(exists=True),
desc='arrays keeping original locations')
class SplitROIs(BaseInterface):
"""
Splits a 3D image in small chunks to enable parallel processing.
ROIs keep time series structure in 4D images.
>>> from nipype.algorithms import misc
>>> rois = misc.SplitROIs()
>>> rois.inputs.in_file = 'diffusion.nii'
>>> rois.inputs.in_mask = 'mask.nii'
>>> rois.run() # doctest: +SKIP
"""
input_spec = SplitROIsInputSpec
output_spec = SplitROIsOutputSpec
def _run_interface(self, runtime):
mask = None
roisize = None
self._outnames = {}
if isdefined(self.inputs.in_mask):
mask = self.inputs.in_mask
if isdefined(self.inputs.roi_size):
roisize = self.inputs.roi_size
res = split_rois(self.inputs.in_file,
mask, roisize)
self._outnames['out_files'] = res[0]
self._outnames['out_masks'] = res[1]
self._outnames['out_index'] = res[2]
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
for k, v in self._outnames.iteritems():
outputs[k] = v
return outputs
class MergeROIsInputSpec(TraitedSpec):
in_files = InputMultiPath(File(exists=True, mandatory=True,
desc='files to be re-merged'))
in_index = InputMultiPath(File(exists=True, mandatory=True),
desc='array keeping original locations')
in_reference = File(exists=True, desc='reference file')
class MergeROIsOutputSpec(TraitedSpec):
merged_file = File(exists=True, desc='the recomposed file')
class MergeROIs(BaseInterface):
"""
Splits a 3D image in small chunks to enable parallel processing.
ROIs keep time series structure in 4D images.
Example
-------
>>> from nipype.algorithms import misc
>>> rois = misc.MergeROIs()
>>> rois.inputs.in_files = ['roi%02d.nii' % i for i in xrange(1, 6)]
>>> rois.inputs.in_reference = 'mask.nii'
>>> rois.inputs.in_index = ['roi%02d_idx.npz' % i for i in xrange(1, 6)]
>>> rois.run() # doctest: +SKIP
"""
input_spec = MergeROIsInputSpec
output_spec = MergeROIsOutputSpec
def _run_interface(self, runtime):
res = merge_rois(self.inputs.in_files,
self.inputs.in_index,
self.inputs.in_reference)
self._merged = res
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['merged_file'] = self._merged
return outputs
def normalize_tpms(in_files, in_mask=None, out_files=[]):
"""
Returns the input tissue probability maps (tpms, aka volume fractions)
normalized to sum up 1.0 at each voxel within the mask.
"""
import nibabel as nib
import numpy as np
import os.path as op
in_files = np.atleast_1d(in_files).tolist()
if len(out_files) != len(in_files):
for i,finname in enumerate(in_files):
fname,fext = op.splitext(op.basename(finname))
if fext == '.gz':
fname,fext2 = op.splitext(fname)
fext = fext2 + fext
out_file = op.abspath('%s_norm_%02d%s' % (fname,i,fext))
out_files+= [out_file]
imgs = [nib.load(fim) for fim in in_files]
if len(in_files)==1:
img_data = imgs[0].get_data()
img_data[img_data>0.0] = 1.0
hdr = imgs[0].get_header().copy()
hdr['data_type']= 16
hdr.set_data_dtype(np.float32)
nib.save(nib.Nifti1Image(img_data.astype(np.float32), imgs[0].get_affine(), hdr), out_files[0])
return out_files[0]
img_data = np.array([im.get_data() for im in imgs]).astype(np.float32)
#img_data[img_data>1.0] = 1.0
img_data[img_data<0.0] = 0.0
weights = np.sum(img_data, axis=0)
msk = np.ones_like(imgs[0].get_data())
msk[ weights<= 0 ] = 0
if not in_mask is None:
msk = nib.load(in_mask).get_data()
msk[ msk<=0 ] = 0
msk[ msk>0 ] = 1
msk = np.ma.masked_equal(msk, 0)
for i,out_file in enumerate(out_files):
data = np.ma.masked_equal(img_data[i], 0)
probmap = data / weights
hdr = imgs[i].get_header().copy()
hdr['data_type']= 16
hdr.set_data_dtype('float32')
nib.save(nib.Nifti1Image(probmap.astype(np.float32), imgs[i].get_affine(), hdr), out_file)
return out_files
def split_rois(in_file, mask=None, roishape=None):
"""
Splits an image in ROIs for parallel processing
"""
import nibabel as nb
import numpy as np
from math import sqrt, ceil
import os.path as op
if roishape is None:
roishape = (10, 10, 1)
im = nb.load(in_file)
imshape = im.get_shape()
dshape = imshape[:3]
nvols = imshape[-1]
roisize = roishape[0] * roishape[1] * roishape[2]
droishape = (roishape[0], roishape[1], roishape[2], nvols)
if mask is not None:
mask = nb.load(mask).get_data()
mask[mask > 0] = 1
mask[mask < 1] = 0
else:
mask = np.ones(dshape)
mask = mask.reshape(-1).astype(np.uint8)
nzels = np.nonzero(mask)
els = np.sum(mask)
nrois = int(ceil(els/roisize))
data = im.get_data().reshape((mask.size, -1))
data = np.squeeze(data.take(nzels, axis=0))
nvols = data.shape[-1]
roidefname = op.abspath('onesmask.nii.gz')
nb.Nifti1Image(np.ones(roishape, dtype=np.uint8), None,
None).to_filename(roidefname)
out_files = []
out_mask = []
out_idxs = []
for i in xrange(nrois):
first = i * roisize
last = (i+1) * roisize
fill = 0
if last > els:
fill = last - els
last = els
droi = data[first:last, ...]
iname = op.abspath('roi%010d_idx' % i)
out_idxs.append(iname+'.npz')
np.savez(iname, (nzels[0][first:last],))
if fill > 0:
droi = np.vstack((droi, np.zeros((fill, nvols), dtype=np.float32)))
partialmsk = np.ones((roisize,), dtype=np.uint8)
partialmsk[-fill:] = 0
partname = op.abspath('partialmask.nii.gz')
nb.Nifti1Image(partialmsk.reshape(roishape), None,
None).to_filename(partname)
out_mask.append(partname)
else:
out_mask.append(roidefname)
fname = op.abspath('roi%010d.nii.gz' % i)
nb.Nifti1Image(droi.reshape(droishape),
None, None).to_filename(fname)
out_files.append(fname)
return out_files, out_mask, out_idxs
def merge_rois(in_files, in_idxs, in_ref,
dtype=None, out_file=None):
"""
Re-builds an image resulting from a parallelized processing
"""
import nibabel as nb
import numpy as np
import os.path as op
import subprocess as sp
if out_file is None:
out_file = op.abspath('merged.nii.gz')
if dtype is None:
dtype = np.float32
# if file is compressed, uncompress using os
# to avoid memory errors
if op.splitext(in_ref)[1] == '.gz':
try:
iflogger.info('uncompress %i' % in_ref)
sp.check_call(['gunzip', in_ref], stdout=sp.PIPE, shell=True)
in_ref = op.splitext(in_ref)[0]
except:
pass
ref = nb.load(in_ref)
aff = ref.get_affine()
hdr = ref.get_header().copy()
rsh = ref.get_shape()
del ref
npix = rsh[0] * rsh[1] * rsh[2]
fcdata = nb.load(in_files[0]).get_data()
if fcdata.ndim == 4:
ndirs = fcdata.shape[-1]
else:
ndirs = 1
newshape = (rsh[0], rsh[1], rsh[2], ndirs)
hdr.set_data_dtype(dtype)
hdr.set_xyzt_units('mm', 'sec')
if ndirs < 300:
data = np.zeros((npix, ndirs))
for cname, iname in zip(in_files, in_idxs):
f = np.load(iname)
idxs = np.squeeze(f['arr_0'])
cdata = nb.load(cname).get_data().reshape(-1, ndirs)
nels = len(idxs)
idata = (idxs, )
try:
data[idata, ...] = cdata[0:nels, ...]
except:
print(('Consistency between indexes and chunks was '
'lost: data=%s, chunk=%s') % (str(data.shape),
str(cdata.shape)))
raise
hdr.set_data_shape(newshape)
nb.Nifti1Image(data.reshape(newshape).astype(dtype),
aff, hdr).to_filename(out_file)
else:
hdr.set_data_shape(rsh[:3])
nii = []
for d in xrange(ndirs):
fname = op.abspath('vol%06d.nii' % d)
nb.Nifti1Image(np.zeros(rsh[:3]), aff, hdr).to_filename(fname)
nii.append(fname)
for cname, iname in zip(in_files, in_idxs):
f = np.load(iname)
idxs = np.squeeze(f['arr_0'])
for d, fname in enumerate(nii):
data = nb.load(fname).get_data().reshape(-1)
cdata = nb.load(cname).get_data().reshape(-1, ndirs)[:, d]
nels = len(idxs)
idata = (idxs, )
data[idata] = cdata[0:nels]
nb.Nifti1Image(data.reshape(rsh[:3]),
aff, hdr).to_filename(fname)
imgs = [nb.load(im) for im in nii]
allim = nb.concat_images(imgs)
allim.to_filename(out_file)
return out_file
# Deprecated interfaces ------------------------------------------------------
class Distance(nam.Distance):
"""Calculates distance between two volumes.
.. deprecated:: 0.10.0
Use :py:class:`nipype.algorithms.metrics.Distance` instead.
"""
def __init__(self, **inputs):
super(nam.Distance, self).__init__(**inputs)
warnings.warn(("This interface has been deprecated since 0.10.0,"
" please use nipype.algorithms.metrics.Distance"),
DeprecationWarning)
class Overlap(nam.Overlap):
"""Calculates various overlap measures between two maps.
.. deprecated:: 0.10.0
Use :py:class:`nipype.algorithms.metrics.Overlap` instead.
"""
def __init__(self, **inputs):
super(nam.Overlap, self).__init__(**inputs)
warnings.warn(("This interface has been deprecated since 0.10.0,"
" please use nipype.algorithms.metrics.Overlap"),
DeprecationWarning)
class FuzzyOverlap(nam.FuzzyOverlap):
"""Calculates various overlap measures between two maps, using a fuzzy
definition.
.. deprecated:: 0.10.0
Use :py:class:`nipype.algorithms.metrics.FuzzyOverlap` instead.
"""
def __init__(self, **inputs):
super(nam.FuzzyOverlap, self).__init__(**inputs)
warnings.warn(("This interface has been deprecated since 0.10.0,"
" please use nipype.algorithms.metrics.FuzzyOverlap"),
DeprecationWarning)
| bsd-3-clause |
direvius/bfg | bfg/aggregator.py | 1 | 6997 | '''
Data aggregation facilities.
'''
import threading as th
import queue
import multiprocessing as mp
from .module_exceptions import ConfigurationError
from .util import FactoryBase
from .guns.base import Sample
from .util import q_to_dict
import asyncio
import time
from dateutil import tz
import numpy as np
import pandas as pd
import arrow
import logging
logger = logging.getLogger(__name__)
class ResultsSink(object):
''' Just collects samples, does not aggregate '''
def __init__(self, event_loop):
self.event_loop = event_loop
self.results = {}
self.results_queue = mp.Queue()
self._stop = False
self.stopped = False
self.event_loop.create_task(self._reader())
async def stop(self):
'''
Signal the reading coroutine to stop and wait for it
'''
self._stop = True
while not self.stopped:
await asyncio.sleep(1)
async def _reader(self):
'''
Read from results queue asyncronously and put samples into
results dict
'''
logger.info("Results reader started")
while not self._stop:
try:
sample = self.results_queue.get_nowait()
self.results.setdefault(sample.ts, []).append(sample)
except queue.Empty:
await asyncio.sleep(1)
logger.info("Results reader stopped")
self.stopped = True
class CachingAggregator(object):
'''
Caching aggregator that can also notify its listeners
and write raw samples to a file. Listeners should have
a publish(timestamp, aggregated_data) method
'''
def __init__(
self, event_loop,
cache_depth=5, listeners=[],
raw_filename='result.samples'):
self.raw_file = open(raw_filename, 'w')
self.first_write = True
self.cache_depth = cache_depth
self.event_loop = event_loop
self.results = {}
self.aggregated_results = {}
self.results_queue = mp.Queue()
self._stop = False
self.reader_stopped = False
self.aggregator_stopped = False
self.listeners = listeners
self.event_loop.create_task(self._reader())
self.event_loop.create_task(self._aggregator())
async def stop(self):
'''
Set cache-depth to 0 in order to aggregate all the results in buffer.
Aggregator will exit automatically when it observe that reader is
stopped and the buffer is empty (so nothing will probably appear
in the buffer)
'''
self.cache_depth = 0 # empty the cache
self._stop = True
while not self.reader_stopped:
await asyncio.sleep(1)
while not self.aggregator_stopped:
await asyncio.sleep(1)
async def _reader(self):
'''
Read everything from the queue until it empty, then sleep
for half a second
'''
logger.info("Results reader started")
while not self._stop:
try:
sample = self.results_queue.get_nowait()
self.results.setdefault(sample.ts, []).append(sample)
except queue.Empty:
await asyncio.sleep(0.5)
logger.info("Results reader stopped")
self.reader_stopped = True
async def _aggregator(self):
'''
Sleep before next aggregation is needed (aggregations performed
once each second), grab the oldest data from the buffer maintaning
its size, aggregate it and send results to listeners by calling publish
The aggregate() function will also write raw samples to a file
'''
start_time = time.time()
while not (self.reader_stopped and len(self.results) == 0):
work_time = time.time() - start_time
logger.debug("Last aggregation took %02d µs", work_time * 1000000)
delay = 1 - work_time
if delay > 0:
await asyncio.sleep(delay)
start_time = time.time()
for _ in range(len(self.results) - self.cache_depth):
smallest_key = min(self.results.keys())
ts, aggr = self.aggregate(
smallest_key, self.results.pop(smallest_key))
if aggr:
self.publish(ts, aggr)
logger.info("Results aggregator stopped")
self.aggregator_stopped = True
def publish(self, ts, aggr):
'''
Send aggregated data to the listeners
'''
logger.debug("Publishing aggregated data for %s:\n%s", ts, aggr)
self.aggregated_results[ts] = aggr
[l.publish(ts, aggr) for l in self.listeners]
def _stat_for_df(self, df):
'''
Collect stat for a dataframe
'''
return {
"samples": len(df),
"delay": {
"avg": df.delay.mean(),
"quantiles": q_to_dict(df.delay.quantile(
[0, .25, .5, .75, .9, .99, 1])),
},
"rt": {
"avg": df.rt.mean(),
"quantiles": q_to_dict(df.rt.quantile(
[0, .25, .5, .75, .9, .99, 1])),
}
}
def aggregate(self, ts, samples):
'''
Convert samples to dataframe, save raw samples to a file,
compute some statistics and return aggregated data
'''
if ts in self.aggregated_results:
logger.warning(
"%s already aggregated. Some data points lost."
"Try increasing aggregator cachesize")
return ts, None
df = pd.DataFrame(samples, columns=Sample._fields)
df.to_csv(self.raw_file, sep='\t', index=False, header=self.first_write)
self.first_write = False # write headers only in the beginning
aggr = {
"rps": len(df),
"overall": self._stat_for_df(df),
}
return ts, aggr
class LoggingListener(object):
def publish(self, ts, data):
rt_stats = data.get('overall').get('rt')
logger.info(
"{ts} {rps} RPS, mean RT: {rt_avg:.3f} ms, 99% < {rt_q99:.3f} ms".format(
ts=arrow.get(ts).to(tz.gettz()).format('HH:mm:ss'),
rps=data.get('rps'),
rt_avg=rt_stats.get('avg') / 1000,
rt_q99=rt_stats.get('quantiles').get('99') / 1000
)
)
class AggregatorFactory(FactoryBase):
''' Factory that produces aggregators '''
FACTORY_NAME = "aggregator"
def __init__(self, component_factory):
super().__init__(component_factory)
self.results = CachingAggregator(
self.event_loop,
listeners=[LoggingListener()])
def get(self, key):
if key in self.factory_config:
return self.results
else:
raise ConfigurationError(
"Configuration for %s schedule not found" % key)
| mit |
GDSA-SED/Projecte-SED-2013 | complementary_evaluation/single_evaluation.py | 1 | 7329 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import MySQLdb as SQL
import numpy as np
import matplotlib.pyplot as pl
nom = raw_input("Introdueixi el nom del fitxer que conte els resultats de les imatges clasificades (escrigui exit per sortir):")
if nom != "exit":
fitxer = open(nom, 'r') #Obrim el fitxer de dades .txt del clasificador end mode lectura
cdata = fitxer.readline() #Llegim el contingut de la primera línia del fitxer
#Declaració de les variables d'avaluació a calcular
pre = [0,0,0,0,0,0,0,0,0] #precisió per clases
pre_tot = 0 #precisió total
rec = [0,0,0,0,0,0,0,0,0] #record per clases
rec_tot = 0 #record total
F_score = [0,0,0,0,0,0,0,0,0] #F-score per clases
F_score_tot = 0 #F-score total
#Matrius de confusió per clases (posició 0: positius certs, 1: positius falsos, 2: negatius certs, 3: negatius falsos)
dict_MC = {"sports":[0,0,0,0], "concert":[0,0,0,0], "exhibition":[0,0,0,0], "protest":[0,0,0,0], "fashion":[0,0,0,0], "conference":[0,0,0,0], "theater_dance":[0,0,0,0], "other":[0,0,0,0], "non_event":[0,0,0,0]}
#Connexió a la base de dades
db = SQL.connect(host="localhost", user="root", passwd="root",db="gdsa")
while cdata != "": #Lectura línia a línia el fitxer .txt fins al final
ID = cdata[0 : cdata.find(" ")] #Substring que correspon a la ID de la imatge classificada
classe = cdata[cdata.find(" ") + 1 : - 1] #Substring que correspon a la clase de la imatge classificada
cursor = db.cursor()
#Consulta a la base de dades la veritat terreny de la imatge a través de la seva ID
cursor.execute("SELECT event_type FROM sed2013_task2_dataset_train_gs WHERE document_id =" + "'" + ID + "'")
classe_db = cursor.fetchone()[0] #Adquisició de la clase de la imatge de la base de dades
#Càlcul dels positius certs, positius falsos, negatius certs i negatius falsos per a cada matriu de confusió de cada classe
if classe == classe_db:
dict_MC[classe][0] += 1
for i in range(len(dict_MC)):
if dict_MC.keys()[i] != classe:
d = dict_MC.keys()[i]
dict_MC[d][2] += 1
else:
for i in range(len(dict_MC)):
if dict_MC.keys()[i] != classe_db and dict_MC.keys()[i] != classe:
d = dict_MC.keys()[i]
dict_MC[d][2] += 1
dict_MC[classe_db][3] += 1
dict_MC[classe][1] += 1
cdata = fitxer.readline() #Lectura de la següent línia
fitxer.close()
#Càlcul dels paràmetres de precisió, record i F-Score a partir de les matrius de confusió
num_div_p = 0 #Número divisori per calcular la mitjana de precisió
num_div_r = 0 #Número divisori per calcular la mitjana de record
num_div_f = 0 #Número divisori per calcular la mitjana de F-score
for i in range(len(dict_MC)):
d = dict_MC.keys()[i]
if dict_MC[d][0] + dict_MC[d][1] != 0:
pre[i] = round(float(dict_MC[d][0]) / (dict_MC[d][0] + dict_MC[d][1]),5) #Precisió per clases (5 decimals precisió)
elif dict_MC[d][1] != 0:
pre[i] = 0
else:
pre[i] = "none"
if dict_MC[d][0] + dict_MC[d][3] != 0:
rec[i] = round(float(dict_MC[d][0]) / (dict_MC[d][0] + dict_MC[d][3]),5) #Record per clases (5 decimals precisió)
elif dict_MC[d][3]!= 0:
rec[i] = 0
else:
rec[i] = "none"
if pre[i] != "none" and rec[i] != "none":
if pre[i] + rec[i] != 0:
F_score[i] = round(2 * pre[i] * rec[i] / (pre[i] + rec[i]),5) #F-Score per clases (5 decimals precisió)
else:
F_score[i] = 0
else:
F_score[i] = "none"
for i in range(len(dict_MC)):
if pre[i] != "none":
pre_tot = pre[i] + pre_tot #Càlcul de la precisió total
num_div_p += 1
if rec[i] != "none":
rec_tot = rec[i] + rec_tot #Càlcul del record total
num_div_r += 1
if F_score[i] != "none":
F_score_tot = F_score[i] + F_score_tot #Càlcul de la F-Score total
num_div_f += 1
pre_tot = round(pre_tot/num_div_p,5) #Precisió total normalitzada (5 decimals precisió)
rec_tot = round(rec_tot/num_div_r,5) #Record total normalitzat (5 decimals precisió)
F_score_tot = round(F_score_tot/num_div_f,5) #F-Score total normalitzada (5 decimals precisió)
#Creació d'una taula amb els resultats obtinguts a l'avaluació
etiquetas_fil = ('sports', 'concert', 'exhibition', 'protest', 'fashion', 'conference', 'theater_dance', 'other', 'non_event', 'AVERAGE')
etiquetas_col = ('Precision', 'Recall', 'F-Score')
val_table = [[pre[8],rec[8],F_score[8]], [pre[4],rec[4],F_score[4]], [pre[7],rec[7],F_score[7]], [pre[2],rec[2],F_score[2]], [pre[3],rec[3],F_score[3]], [pre[0],rec[0],F_score[0]], [pre[5],rec[5],F_score[5]], [pre[1],rec[1],F_score[1]], [pre[6],rec[6],F_score[6]], [pre_tot, rec_tot, F_score_tot]]
fig = pl.figure(figsize = (12,2))
ax = fig.add_subplot(111)
ax.axis('off')
table = ax.table(cellText = val_table, cellLoc = 'center', rowLabels = etiquetas_fil, rowLoc = 'center', colLabels = etiquetas_col,colLoc = 'center', loc = 'center')
#Creació d'una gràfica amb els resultats obtinguts a l'avaluació
n = np.array(range(10))
val_p = [0,0,0,0,0,0,0,0,0,0]
val_r = [0,0,0,0,0,0,0,0,0,0]
val_f = [0,0,0,0,0,0,0,0,0,0]
for i in range(10):
for j in range(3):
if val_table[i][j] == "none":
val_table[i][j] = 0
val_p[i] = val_table[i][0]
val_r[i] = val_table[i][1]
val_f[i] = val_table[i][2]
ind = np.arange(10)
width = 0.25
fig = pl.figure(figsize = (9,5))
ax = fig.add_subplot(111)
bar_p = ax.bar(ind, val_p, width, color='r')
bar_r = ax.bar(ind+width, val_r, width, color='b')
bar_f = ax.bar(ind+2*width, val_f, width, color='g')
ax.set_title('Avaluation Scores')
ax.set_xticks(ind+1.5*width)
ax.set_xticklabels( ('sports', 'concert', 'exhibition', 'protest', 'fashion', 'conference', 'theater_dance', 'other', 'non_event', 'AVERAGE'), rotation='vertical')
ax.legend((bar_p[0], bar_r[0],bar_f[0]), ('Precision', 'Recall', 'F-Score'), loc='center left', bbox_to_anchor=(1, 0.5))
ax.autoscale(tight=True)
pl.subplots_adjust(right = 0.8,bottom = 0.3)
pl.show()
| gpl-3.0 |
quheng/scikit-learn | sklearn/utils/arpack.py | 265 | 64837 | """
This contains a copy of the future version of
scipy.sparse.linalg.eigen.arpack.eigsh
It's an upgraded wrapper of the ARPACK library which
allows the use of shift-invert mode for symmetric matrices.
Find a few eigenvectors and eigenvalues of a matrix.
Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/
"""
# Wrapper implementation notes
#
# ARPACK Entry Points
# -------------------
# The entry points to ARPACK are
# - (s,d)seupd : single and double precision symmetric matrix
# - (s,d,c,z)neupd: single,double,complex,double complex general matrix
# This wrapper puts the *neupd (general matrix) interfaces in eigs()
# and the *seupd (symmetric matrix) in eigsh().
# There is no Hermetian complex/double complex interface.
# To find eigenvalues of a Hermetian matrix you
# must use eigs() and not eigsh()
# It might be desirable to handle the Hermetian case differently
# and, for example, return real eigenvalues.
# Number of eigenvalues returned and complex eigenvalues
# ------------------------------------------------------
# The ARPACK nonsymmetric real and double interface (s,d)naupd return
# eigenvalues and eigenvectors in real (float,double) arrays.
# Since the eigenvalues and eigenvectors are, in general, complex
# ARPACK puts the real and imaginary parts in consecutive entries
# in real-valued arrays. This wrapper puts the real entries
# into complex data types and attempts to return the requested eigenvalues
# and eigenvectors.
# Solver modes
# ------------
# ARPACK and handle shifted and shift-inverse computations
# for eigenvalues by providing a shift (sigma) and a solver.
__docformat__ = "restructuredtext en"
__all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence']
import warnings
from scipy.sparse.linalg.eigen.arpack import _arpack
import numpy as np
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator
from scipy.sparse import identity, isspmatrix, isspmatrix_csr
from scipy.linalg import lu_factor, lu_solve
from scipy.sparse.sputils import isdense
from scipy.sparse.linalg import gmres, splu
import scipy
from distutils.version import LooseVersion
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}
_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}
DNAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found. IPARAM(5) "
"returns the number of wanted converged Ritz values.",
2: "No longer an informational error. Deprecated starting "
"with release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the "
"Implicitly restarted Arnoldi iteration. One possibility "
"is to increase the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation;",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible.",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated."
}
SNAUPD_ERRORS = DNAUPD_ERRORS
ZNAUPD_ERRORS = DNAUPD_ERRORS.copy()
ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3."
CNAUPD_ERRORS = ZNAUPD_ERRORS
DSAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found.",
2: "No longer an informational error. Deprecated starting with "
"release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the Implicitly "
"restarted Arnoldi iteration. One possibility is to increase "
"the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from trid. eigenvalue calculation; "
"Informational error from LAPACK routine dsteqr .",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible. ",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated.",
}
SSAUPD_ERRORS = DSAUPD_ERRORS
DNEUPD_ERRORS = {
0: "Normal exit.",
1: "The Schur form computed by LAPACK routine dlahqr "
"could not be reordered by LAPACK routine dtrsen. "
"Re-enter subroutine dneupd with IPARAM(5)NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least NCV "
"columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from calculation of a real Schur form. "
"Informational error from LAPACK routine dlahqr .",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine dtrevc.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "DNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "DNEUPD got a different count of the number of converged "
"Ritz values than DNAUPD got. This indicates the user "
"probably made an error in passing data from DNAUPD to "
"DNEUPD or that the data was modified before entering "
"DNEUPD",
}
SNEUPD_ERRORS = DNEUPD_ERRORS.copy()
SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr "
"could not be reordered by LAPACK routine strsen . "
"Re-enter subroutine dneupd with IPARAM(5)=NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.")
SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of "
"converged Ritz values than SNAUPD got. This indicates "
"the user probably made an error in passing data from "
"SNAUPD to SNEUPD or that the data was modified before "
"entering SNEUPD")
ZNEUPD_ERRORS = {0: "Normal exit.",
1: "The Schur form computed by LAPACK routine csheqr "
"could not be reordered by LAPACK routine ztrsen. "
"Re-enter subroutine zneupd with IPARAM(5)=NCV and "
"increase the size of the array D to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 1 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation. "
"This should never happened.",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine ztrevc.",
-10: "IPARAM(7) must be 1,2,3",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "ZNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "ZNEUPD got a different count of the number of "
"converged Ritz values than ZNAUPD got. This "
"indicates the user probably made an error in passing "
"data from ZNAUPD to ZNEUPD or that the data was "
"modified before entering ZNEUPD"}
CNEUPD_ERRORS = ZNEUPD_ERRORS.copy()
CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of "
"converged Ritz values than CNAUPD got. This indicates "
"the user probably made an error in passing data from "
"CNAUPD to CNEUPD or that the data was modified before "
"entering CNEUPD")
DSEUPD_ERRORS = {
0: "Normal exit.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: ("Error return from trid. eigenvalue calculation; "
"Information error from LAPACK routine dsteqr."),
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "NEV and WHICH = 'BE' are incompatible.",
-14: "DSAUPD did not find any eigenvalues to sufficient accuracy.",
-15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.",
-16: "HOWMNY = 'S' not yet implemented",
-17: ("DSEUPD got a different count of the number of converged "
"Ritz values than DSAUPD got. This indicates the user "
"probably made an error in passing data from DSAUPD to "
"DSEUPD or that the data was modified before entering "
"DSEUPD.")
}
SSEUPD_ERRORS = DSEUPD_ERRORS.copy()
SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues "
"to sufficient accuracy.")
SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of "
"converged "
"Ritz values than SSAUPD got. This indicates the user "
"probably made an error in passing data from SSAUPD to "
"SSEUPD or that the data was modified before entering "
"SSEUPD.")
_SAUPD_ERRORS = {'d': DSAUPD_ERRORS,
's': SSAUPD_ERRORS}
_NAUPD_ERRORS = {'d': DNAUPD_ERRORS,
's': SNAUPD_ERRORS,
'z': ZNAUPD_ERRORS,
'c': CNAUPD_ERRORS}
_SEUPD_ERRORS = {'d': DSEUPD_ERRORS,
's': SSEUPD_ERRORS}
_NEUPD_ERRORS = {'d': DNEUPD_ERRORS,
's': SNEUPD_ERRORS,
'z': ZNEUPD_ERRORS,
'c': CNEUPD_ERRORS}
# accepted values of parameter WHICH in _SEUPD
_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']
# accepted values of parameter WHICH in _NAUPD
_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
class ArpackError(RuntimeError):
"""
ARPACK error
"""
def __init__(self, info, infodict=_NAUPD_ERRORS):
msg = infodict.get(info, "Unknown error")
RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg))
class ArpackNoConvergence(ArpackError):
"""
ARPACK iteration did not converge
Attributes
----------
eigenvalues : ndarray
Partial result. Converged eigenvalues.
eigenvectors : ndarray
Partial result. Converged eigenvectors.
"""
def __init__(self, msg, eigenvalues, eigenvectors):
ArpackError.__init__(self, -1, {-1: msg})
self.eigenvalues = eigenvalues
self.eigenvectors = eigenvectors
class _ArpackParams(object):
def __init__(self, n, k, tp, mode=1, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
if k <= 0:
raise ValueError("k must be positive, k=%d" % k)
if maxiter is None:
maxiter = n * 10
if maxiter <= 0:
raise ValueError("maxiter must be positive, maxiter=%d" % maxiter)
if tp not in 'fdFD':
raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")
if v0 is not None:
# ARPACK overwrites its initial resid, make a copy
self.resid = np.array(v0, copy=True)
info = 1
else:
self.resid = np.zeros(n, tp)
info = 0
if sigma is None:
#sigma not used
self.sigma = 0
else:
self.sigma = sigma
if ncv is None:
ncv = 2 * k + 1
ncv = min(ncv, n)
self.v = np.zeros((n, ncv), tp) # holds Ritz vectors
self.iparam = np.zeros(11, "int")
# set solver mode and parameters
ishfts = 1
self.mode = mode
self.iparam[0] = ishfts
self.iparam[2] = maxiter
self.iparam[3] = 1
self.iparam[6] = mode
self.n = n
self.tol = tol
self.k = k
self.maxiter = maxiter
self.ncv = ncv
self.which = which
self.tp = tp
self.info = info
self.converged = False
self.ido = 0
def _raise_no_convergence(self):
msg = "No convergence (%d iterations, %d/%d eigenvectors converged)"
k_ok = self.iparam[4]
num_iter = self.iparam[2]
try:
ev, vec = self.extract(True)
except ArpackError as err:
msg = "%s [%s]" % (msg, err)
ev = np.zeros((0,))
vec = np.zeros((self.n, 0))
k_ok = 0
raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)
class _SymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x :
# A - symmetric
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the general eigenvalue problem:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
#
# mode = 4:
# Solve the general eigenvalue problem in Buckling mode:
# A*x = lambda*AG*x
# A - symmetric positive semi-definite
# AG - symmetric indefinite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = left multiplication by [A-sigma*AG]^-1
#
# mode = 5:
# Solve the general eigenvalue problem in Cayley-transformed mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 3:
if matvec is not None:
raise ValueError("matvec must not be specified for mode=3")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=3")
if M_matvec is None:
self.OP = Minv_matvec
self.OPa = Minv_matvec
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(M_matvec(x))
self.OPa = Minv_matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 4:
if matvec is None:
raise ValueError("matvec must be specified for mode=4")
if M_matvec is not None:
raise ValueError("M_matvec must not be specified for mode=4")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=4")
self.OPa = Minv_matvec
self.OP = lambda x: self.OPa(matvec(x))
self.B = matvec
self.bmat = 'G'
elif mode == 5:
if matvec is None:
raise ValueError("matvec must be specified for mode=5")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=5")
self.OPa = Minv_matvec
self.A_matvec = matvec
if M_matvec is None:
self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(matvec(x)
+ sigma * M_matvec(x))
self.B = M_matvec
self.bmat = 'G'
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _SEUPD_WHICH:
raise ValueError("which must be one of %s"
% ' '.join(_SEUPD_WHICH))
if k >= n:
raise ValueError("k must be less than rank(A), k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k:
raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(self.ncv * (self.ncv + 8), self.tp)
ltr = _type_conv[self.tp]
if ltr not in ["s", "d"]:
raise ValueError("Input matrix is not real-valued.")
self._arpack_solver = _arpack.__dict__[ltr + 'saupd']
self._arpack_extract = _arpack.__dict__[ltr + 'seupd']
self.iterate_infodict = _SAUPD_ERRORS[ltr]
self.extract_infodict = _SEUPD_ERRORS[ltr]
self.ipntr = np.zeros(11, "int")
def iterate(self):
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info = \
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode == 1:
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.mode == 2:
self.workd[xslice] = self.OPb(self.workd[xslice])
self.workd[yslice] = self.OPa(self.workd[xslice])
elif self.mode == 5:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
Ax = self.A_matvec(self.workd[xslice])
self.workd[yslice] = self.OPa(Ax + (self.sigma *
self.workd[Bxslice]))
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
rvec = return_eigenvectors
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma,
self.bmat, self.which, self.k,
self.tol, self.resid, self.v,
self.iparam[0:7], self.ipntr,
self.workd[0:2 * self.n],
self.workl, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
class _UnsymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x
# A - square matrix
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the generalized eigenvalue problem:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3,4:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
# if A is real and mode==3, use the real part of Minv_matvec
# if A is real and mode==4, use the imag part of Minv_matvec
# if A is complex and mode==3,
# use real and imag parts of Minv_matvec
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode in (3, 4):
if matvec is None:
raise ValueError("matvec must be specified "
"for mode in (3,4)")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified "
"for mode in (3,4)")
self.matvec = matvec
if tp in 'DF': # complex type
if mode == 3:
self.OPa = Minv_matvec
else:
raise ValueError("mode=4 invalid for complex A")
else: # real type
if mode == 3:
self.OPa = lambda x: np.real(Minv_matvec(x))
else:
self.OPa = lambda x: np.imag(Minv_matvec(x))
if M_matvec is None:
self.B = lambda x: x
self.bmat = 'I'
self.OP = self.OPa
else:
self.B = M_matvec
self.bmat = 'G'
self.OP = lambda x: self.OPa(M_matvec(x))
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _NEUPD_WHICH:
raise ValueError("Parameter which must be one of %s"
% ' '.join(_NEUPD_WHICH))
if k >= n - 1:
raise ValueError("k must be less than rank(A)-1, k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k + 1:
raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(3 * self.ncv * (self.ncv + 2), self.tp)
ltr = _type_conv[self.tp]
self._arpack_solver = _arpack.__dict__[ltr + 'naupd']
self._arpack_extract = _arpack.__dict__[ltr + 'neupd']
self.iterate_infodict = _NAUPD_ERRORS[ltr]
self.extract_infodict = _NEUPD_ERRORS[ltr]
self.ipntr = np.zeros(14, "int")
if self.tp in 'FD':
self.rwork = np.zeros(self.ncv, self.tp.lower())
else:
self.rwork = None
def iterate(self):
if self.tp in 'fd':
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.info)
else:
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.rwork, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode in (1, 2):
self.workd[yslice] = self.OP(self.workd[xslice])
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
k, n = self.k, self.n
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
sigmar = np.real(self.sigma)
sigmai = np.imag(self.sigma)
workev = np.zeros(3 * self.ncv, self.tp)
if self.tp in 'fd':
dr = np.zeros(k + 1, self.tp)
di = np.zeros(k + 1, self.tp)
zr = np.zeros((n, k + 1), self.tp)
dr, di, zr, ierr = \
self._arpack_extract(
return_eigenvectors, howmny, sselect, sigmar, sigmai,
workev, self.bmat, self.which, k, self.tol, self.resid,
self.v, self.iparam, self.ipntr, self.workd, self.workl,
self.info)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
nreturned = self.iparam[4] # number of good eigenvalues returned
# Build complex eigenvalues from real and imaginary parts
d = dr + 1.0j * di
# Arrange the eigenvectors: complex eigenvectors are stored as
# real,imaginary in consecutive columns
z = zr.astype(self.tp.upper())
# The ARPACK nonsymmetric real and double interface (s,d)naupd
# return eigenvalues and eigenvectors in real (float,double)
# arrays.
# Efficiency: this should check that return_eigenvectors == True
# before going through this construction.
if sigmai == 0:
i = 0
while i <= k:
# check if complex
if abs(d[i].imag) != 0:
# this is a complex conjugate pair with eigenvalues
# in consecutive columns
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
else:
# real matrix, mode 3 or 4, imag(sigma) is nonzero:
# see remark 3 in <s,d>neupd.f
# Build complex eigenvalues from real and imaginary parts
i = 0
while i <= k:
if abs(d[i].imag) == 0:
d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
else:
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
d[i] = ((np.dot(zr[:, i],
self.matvec(zr[:, i]))
+ np.dot(zr[:, i + 1],
self.matvec(zr[:, i + 1])))
+ 1j * (np.dot(zr[:, i],
self.matvec(zr[:, i + 1]))
- np.dot(zr[:, i + 1],
self.matvec(zr[:, i]))))
d[i + 1] = d[i].conj()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
# Now we have k+1 possible eigenvalues and eigenvectors
# Return the ones specified by the keyword "which"
if nreturned <= k:
# we got less or equal as many eigenvalues we wanted
d = d[:nreturned]
z = z[:, :nreturned]
else:
# we got one extra eigenvalue (likely a cc pair, but which?)
# cut at approx precision for sorting
rd = np.round(d, decimals=_ndigits[self.tp])
if self.which in ['LR', 'SR']:
ind = np.argsort(rd.real)
elif self.which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest
# abs(imaginary) why?
ind = np.argsort(abs(rd.imag))
else:
ind = np.argsort(abs(rd))
if self.which in ['LR', 'LM', 'LI']:
d = d[ind[-k:]]
z = z[:, ind[-k:]]
if self.which in ['SR', 'SM', 'SI']:
d = d[ind[:k]]
z = z[:, ind[:k]]
else:
# complex is so much simpler...
d, z, ierr =\
self._arpack_extract(
return_eigenvectors, howmny, sselect, self.sigma, workev,
self.bmat, self.which, k, self.tol, self.resid, self.v,
self.iparam, self.ipntr, self.workd, self.workl,
self.rwork, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
class SpLuInv(LinearOperator):
"""
SpLuInv:
helper class to repeatedly solve M*x=b
using a sparse LU-decopposition of M
"""
def __init__(self, M):
self.M_lu = splu(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
def _matvec(self, x):
# careful here: splu.solve will throw away imaginary
# part of x if M is real
if self.isreal and np.issubdtype(x.dtype, np.complexfloating):
return (self.M_lu.solve(np.real(x))
+ 1j * self.M_lu.solve(np.imag(x)))
else:
return self.M_lu.solve(x)
class LuInv(LinearOperator):
"""
LuInv:
helper class to repeatedly solve M*x=b
using an LU-decomposition of M
"""
def __init__(self, M):
self.M_lu = lu_factor(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
def _matvec(self, x):
return lu_solve(self.M_lu, x)
class IterInv(LinearOperator):
"""
IterInv:
helper class to repeatedly solve M*x=b
using an iterative method.
"""
def __init__(self, M, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(M.dtype).eps
self.M = M
self.ifunc = ifunc
self.tol = tol
if hasattr(M, 'dtype'):
dtype = M.dtype
else:
x = np.zeros(M.shape[1])
dtype = (M * x).dtype
LinearOperator.__init__(self, M.shape, self._matvec, dtype=dtype)
def _matvec(self, x):
b, info = self.ifunc(self.M, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting M: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
class IterOpInv(LinearOperator):
"""
IterOpInv:
helper class to repeatedly solve [A-sigma*M]*x = b
using an iterative method
"""
def __init__(self, A, M, sigma, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(A.dtype).eps
self.A = A
self.M = M
self.sigma = sigma
self.ifunc = ifunc
self.tol = tol
x = np.zeros(A.shape[1])
if M is None:
dtype = self.mult_func_M_None(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func_M_None,
dtype=dtype)
else:
dtype = self.mult_func(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func,
dtype=dtype)
LinearOperator.__init__(self, A.shape, self._matvec, dtype=dtype)
def mult_func(self, x):
return self.A.matvec(x) - self.sigma * self.M.matvec(x)
def mult_func_M_None(self, x):
return self.A.matvec(x) - self.sigma * x
def _matvec(self, x):
b, info = self.ifunc(self.OP, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting [A-sigma*M]: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
def get_inv_matvec(M, symmetric=False, tol=0):
if isdense(M):
return LuInv(M).matvec
elif isspmatrix(M):
if isspmatrix_csr(M) and symmetric:
M = M.T
return SpLuInv(M).matvec
else:
return IterInv(M, tol=tol).matvec
def get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0):
if sigma == 0:
return get_inv_matvec(A, symmetric=symmetric, tol=tol)
if M is None:
#M is the identity matrix
if isdense(A):
if (np.issubdtype(A.dtype, np.complexfloating)
or np.imag(sigma) == 0):
A = np.copy(A)
else:
A = A + 0j
A.flat[::A.shape[1] + 1] -= sigma
return LuInv(A).matvec
elif isspmatrix(A):
A = A - sigma * identity(A.shape[0])
if symmetric and isspmatrix_csr(A):
A = A.T
return SpLuInv(A.tocsc()).matvec
else:
return IterOpInv(_aslinearoperator_with_dtype(A), M, sigma,
tol=tol).matvec
else:
if ((not isdense(A) and not isspmatrix(A)) or
(not isdense(M) and not isspmatrix(M))):
return IterOpInv(_aslinearoperator_with_dtype(A),
_aslinearoperator_with_dtype(M), sigma,
tol=tol).matvec
elif isdense(A) or isdense(M):
return LuInv(A - sigma * M).matvec
else:
OP = A - sigma * M
if symmetric and isspmatrix_csr(OP):
OP = OP.T
return SpLuInv(OP.tocsc()).matvec
def _eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None,
OPpart=None):
"""
Find k eigenvalues and eigenvectors of the square matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem
for w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing \
the operation A * x, where A is a real or complex square matrix.
k : int, default 6
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
return_eigenvectors : boolean, default True
Whether to return the eigenvectors along with the eigenvalues.
M : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation M*x for the generalized eigenvalue problem
``A * x = w * M * x``
M must represent a real symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma==None, M is positive definite
* If sigma is specified, M is positive semi-definite
If sigma==None, eigs requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real or complex
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] * x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
For a real matrix A, shift-invert can either be done in imaginary
mode or real mode, specified by the parameter OPpart ('r' or 'i').
Note that when sigma is specified, the keyword 'which' (below)
refers to the shifted eigenvalues w'[i] where:
* If A is real and OPpart == 'r' (default),
w'[i] = 1/2 * [ 1/(w[i]-sigma) + 1/(w[i]-conj(sigma)) ]
* If A is real and OPpart == 'i',
w'[i] = 1/2i * [ 1/(w[i]-sigma) - 1/(w[i]-conj(sigma)) ]
* If A is complex,
w'[i] = 1/(w[i]-sigma)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
`ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.
which : string ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI']
Which `k` eigenvectors and eigenvalues to find:
- 'LM' : largest magnitude
- 'SM' : smallest magnitude
- 'LR' : largest real part
- 'SR' : smallest real part
- 'LI' : largest imaginary part
- 'SI' : smallest imaginary part
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion)
The default value of 0 implies machine precision.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
Minv : N x N matrix, array, sparse matrix, or linear operator
See notes in M, above.
OPinv : N x N matrix, array, sparse matrix, or linear operator
See notes in sigma, above.
OPpart : 'r' or 'i'.
See notes in sigma, above
Returns
-------
w : array
Array of k eigenvalues.
v : array
An array of `k` eigenvectors.
``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigsh : eigenvalues and eigenvectors for symmetric matrix A
svds : singular value decomposition for a matrix A
Examples
--------
Find 6 eigenvectors of the identity matrix:
>>> from sklearn.utils.arpack import eigs
>>> id = np.identity(13)
>>> vals, vecs = eigs(id, k=6)
>>> vals
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> vecs.shape
(13, 6)
Notes
-----
This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,
ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to
find the eigenvalues and eigenvectors [2]_.
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if OPpart is not None:
raise ValueError("OPpart should not be specified with "
"sigma = None or complex A")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
#sigma is not None: shift-invert mode
if np.issubdtype(A.dtype, np.complexfloating):
if OPpart is not None:
raise ValueError("OPpart should not be specified "
"with sigma=None or complex A")
mode = 3
elif OPpart is None or OPpart.lower() == 'r':
mode = 3
elif OPpart.lower() == 'i':
if np.imag(sigma) == 0:
raise ValueError("OPpart cannot be 'i' if sigma is real")
mode = 4
else:
raise ValueError("OPpart must be one of ('r','i')")
matvec = _aslinearoperator_with_dtype(A).matvec
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=False, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None,
OPinv=None, mode='normal'):
"""
Find k eigenvalues and eigenvectors of the real symmetric square matrix
or complex hermitian matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for
w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation A * x, where A is a real symmetric matrix
For buckling mode (see below) A must additionally be positive-definite
k : integer
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
M : An N x N matrix, array, sparse matrix, or linear operator representing
the operation M * x for the generalized eigenvalue problem
``A * x = w * M * x``.
M must represent a real, symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma == None, M is symmetric positive definite
* If sigma is specified, M is symmetric positive semi-definite
* In buckling mode, M is symmetric indefinite.
If sigma == None, eigsh requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
Note that when sigma is specified, the keyword 'which' refers to
the shifted eigenvalues w'[i] where:
- if mode == 'normal',
w'[i] = 1 / (w[i] - sigma)
- if mode == 'cayley',
w'[i] = (w[i] + sigma) / (w[i] - sigma)
- if mode == 'buckling',
w'[i] = w[i] / (w[i] - sigma)
(see further discussion in 'mode' below)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k and smaller than n;
it is recommended that ncv > 2*k
which : string ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
If A is a complex hermitian matrix, 'BE' is invalid.
Which `k` eigenvectors and eigenvalues to find
- 'LM' : Largest (in magnitude) eigenvalues
- 'SM' : Smallest (in magnitude) eigenvalues
- 'LA' : Largest (algebraic) eigenvalues
- 'SA' : Smallest (algebraic) eigenvalues
- 'BE' : Half (k/2) from each end of the spectrum
When k is odd, return one more (k/2+1) from the high end
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion).
The default value of 0 implies machine precision.
Minv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in M, above
OPinv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in sigma, above.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
mode : string ['normal' | 'buckling' | 'cayley']
Specify strategy to use for shift-invert mode. This argument applies
only for real-valued A and sigma != None. For shift-invert mode,
ARPACK internally solves the eigenvalue problem
``OP * x'[i] = w'[i] * B * x'[i]``
and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]
into the desired eigenvectors and eigenvalues of the problem
``A * x[i] = w[i] * M * x[i]``.
The modes are as follows:
- 'normal' : OP = [A - sigma * M]^-1 * M
B = M
w'[i] = 1 / (w[i] - sigma)
- 'buckling' : OP = [A - sigma * M]^-1 * A
B = A
w'[i] = w[i] / (w[i] - sigma)
- 'cayley' : OP = [A - sigma * M]^-1 * [A + sigma * M]
B = M
w'[i] = (w[i] + sigma) / (w[i] - sigma)
The choice of mode will affect which eigenvalues are selected by
the keyword 'which', and can also impact the stability of
convergence (see [2] for a discussion)
Returns
-------
w : array
Array of k eigenvalues
v : array
An array of k eigenvectors
The v[i] is the eigenvector corresponding to the eigenvector w[i]
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
svds : singular value decomposition for a matrix A
Notes
-----
This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD
functions which use the Implicitly Restarted Lanczos Method to
find the eigenvalues and eigenvectors [2]_.
Examples
--------
>>> from sklearn.utils.arpack import eigsh
>>> id = np.identity(13)
>>> vals, vecs = eigsh(id, k=6)
>>> vals # doctest: +SKIP
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> print(vecs.shape)
(13, 6)
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
# complex hermitian matrices should be solved with eigs
if np.issubdtype(A.dtype, np.complexfloating):
if mode != 'normal':
raise ValueError("mode=%s cannot be used with "
"complex matrix A" % mode)
if which == 'BE':
raise ValueError("which='BE' cannot be used with complex matrix A")
elif which == 'LA':
which = 'LR'
elif which == 'SA':
which = 'SR'
ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,
ncv=ncv, maxiter=maxiter, tol=tol,
return_eigenvectors=return_eigenvectors, Minv=Minv,
OPinv=OPinv)
if return_eigenvectors:
return ret[0].real, ret[1]
else:
return ret.real
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
A = _aslinearoperator_with_dtype(A)
matvec = A.matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
# sigma is not None: shift-invert mode
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
# normal mode
if mode == 'normal':
mode = 3
matvec = None
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M = _aslinearoperator_with_dtype(M)
M_matvec = M.matvec
# buckling mode
elif mode == 'buckling':
mode = 4
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
matvec = _aslinearoperator_with_dtype(A).matvec
M_matvec = None
# cayley-transform mode
elif mode == 'cayley':
mode = 5
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
# unrecognized mode
else:
raise ValueError("unrecognized mode '%s'" % mode)
params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _svds(A, k=6, ncv=None, tol=0):
"""Compute k singular values/vectors for a sparse matrix using ARPACK.
Parameters
----------
A : sparse matrix
Array to compute the SVD on
k : int, optional
Number of singular values and vectors to compute.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k+1 and smaller than n;
it is recommended that ncv > 2*k
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
Notes
-----
This is a naive implementation using an eigensolver on A.H * A or
A * A.H, depending on which one is more efficient.
"""
if not (isinstance(A, np.ndarray) or isspmatrix(A)):
A = np.asarray(A)
n, m = A.shape
if np.issubdtype(A.dtype, np.complexfloating):
herm = lambda x: x.T.conjugate()
eigensolver = eigs
else:
herm = lambda x: x.T
eigensolver = eigsh
if n > m:
X = A
XH = herm(A)
else:
XH = A
X = herm(A)
if hasattr(XH, 'dot'):
def matvec_XH_X(x):
return XH.dot(X.dot(x))
else:
def matvec_XH_X(x):
return np.dot(XH, np.dot(X, x))
XH_X = LinearOperator(matvec=matvec_XH_X, dtype=X.dtype,
shape=(X.shape[1], X.shape[1]))
# Ignore deprecation warnings here: dot on matrices is deprecated,
# but this code is a backport anyhow
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol ** 2)
s = np.sqrt(eigvals)
if n > m:
v = eigvec
if hasattr(X, 'dot'):
u = X.dot(v) / s
else:
u = np.dot(X, v) / s
vh = herm(v)
else:
u = eigvec
if hasattr(X, 'dot'):
vh = herm(X.dot(u) / s)
else:
vh = herm(np.dot(X, u) / s)
return u, s, vh
# check if backport is actually needed:
if scipy.version.version >= LooseVersion('0.10'):
from scipy.sparse.linalg import eigs, eigsh, svds
else:
eigs, eigsh, svds = _eigs, _eigsh, _svds
| bsd-3-clause |
liesbethvanherpe/NeuroM | examples/plot_somas.py | 5 | 2927 | #!/usr/bin/env python
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Load and view multiple somas'''
import os
from neurom import load_neuron
import neurom.view.common as common
import matplotlib.pyplot as plt
import numpy as np
_path = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(_path, '../test_data')
SWC_PATH = os.path.join(DATA_PATH, 'swc')
def random_color():
'''Random color generation'''
return np.random.rand(3, 1)
def plot_somas(somas):
'''Plot set of somas on same figure as spheres, each with different color'''
_, ax = common.get_figure(new_fig=True, subplot=111,
params={'projection': '3d', 'aspect': 'equal'})
for s in somas:
common.plot_sphere(ax, s.center, s.radius, color=random_color(), alpha=1)
plt.show()
if __name__ == '__main__':
# define set of files containing relevant neurons
file_nms = [os.path.join(SWC_PATH, file_nm) for file_nm in ['Soma_origin.swc',
'Soma_translated_1.swc',
'Soma_translated_2.swc']]
# load from file and plot
sms = [load_neuron(file_nm).soma for file_nm in file_nms]
plot_somas(sms)
| bsd-3-clause |
wimmuskee/readability-score | readability_score/textanalyzer.py | 1 | 6194 | # -*- coding: utf-8 -*-
"""
This class contains the main text analyzer used in all
the calculators.
Wim Muskee, 2012-2017
wimmuskee@gmail.com
License: GPL-2
"""
from __future__ import division
from sys import version_info
import warnings
import re
import os
with warnings.catch_warnings():
# catch NLTK warning, fixed in 4.2.2
warnings.filterwarnings("ignore",category=PendingDeprecationWarning,message='the imp module is deprecated in favour of importlib.*')
# catch ndg-httpsclient warning, fixed in 0.4.2
warnings.filterwarnings("ignore",category=ImportWarning,message='Not importing directory.*ndg.*')
# catch matplotlib warning, don't know what the issue is, no problem for this package
warnings.filterwarnings("ignore",category=ImportWarning,message='Not importing directory.*mpl_toolkits.*')
from nltk.tokenize import sent_tokenize, word_tokenize
import pyphen
class TextAnalyzer:
def __init__(self,text,locale='en_GB'):
self.setText(text)
self.setLocale(locale)
self.sentences = []
self.simple_words = []
self.min_age = 0
self.scores = {
'sent_count': 0, # nr of sentences
'word_count': 0, # nr of words
'letter_count':0, # nr of characters in words (no spaces)
'syll_count': 0, # nr of syllables
'polysyllword_count': 0, # nr of polysyllables (words with more than 2 syllables)
'simpleword_count': 0, # nr of simplewords (depends on provided list)
'sentlen_average': 0, # words per sentence
'wordlen_average': 0, # syllables per word
'wordletter_average': 0, # letters per word
'wordsent_average': 0 # sentences per word
}
self.re_words = re.compile(r'\w+', flags = re.UNICODE)
def setText(self,text):
"""
Sets the text, and makes sure Python2 is working with unicode.
"""
if version_info.major == 1:
raise RuntimeError("Python version too low")
elif version_info.major == 2 and not isinstance(text,unicode):
self.text = unicode(text,'utf-8')
else:
self.text = text
def setLocale(self,locale):
"""
Sets locale-related data.
"""
if os.path.exists(locale):
self.hyphenator = pyphen.Pyphen(filename=locale)
elif len(locale) > 1 and locale in pyphen.LANGUAGES:
self.hyphenator = pyphen.Pyphen(lang=locale)
self.setTokenizeLanguage(locale)
else:
raise LookupError("provided locale not supported by pyphen")
def setSimpleWordsList(self,simplewords):
"""
Simple word list for DaleChall calculator.
"""
if isinstance(simplewords,list):
self.simple_words = simplewords
else:
raise ValueError("A simple word list should be provided as list")
def setTokenizeLanguage(self,locale):
"""
Set the language NLTK's sent_tokenize uses.
Based on local available punkt tokenizers.
This is done in the init, but can also be changed by calling this.
"""
self.tokenize_language = self.__getTokenizelanguage(locale[:2])
def setTextScores(self):
"""
Wrapper for setting all the scores.
"""
self.setSentences()
self.parseSentences()
self.setAverages()
def setSentences(self):
"""
Tokenize the sentences from the text. Depending on the locale,
a custom tokenize language may be used if available.
"""
try:
self.sentences = sent_tokenize(self.text, language=self.tokenize_language)
except LookupError:
# maybe custom tokenize language not available on fs, do default
self.sentences = sent_tokenize(self.text, language="english")
self.scores['sent_count'] = len(self.sentences)
def parseSentences(self):
"""
Parse each sentence and each word, and count
the individual countable scores.
"""
for s in self.sentences:
words = self.re_words.findall(s)
self.scores['word_count'] += len(words)
for w in words:
syllables_count = self.hyphenator.inserted(w).count('-') + 1
self.scores['syll_count'] += syllables_count
self.scores['letter_count'] += len(w)
if syllables_count > 2:
self.scores['polysyllword_count'] += 1
if self.simple_words:
if w.lower() in self.simple_words:
self.scores['simpleword_count'] += 1
def setAverages(self):
"""
Sets all relevant averages based on the
individual counts.
"""
if self.scores['sent_count'] > 0:
self.scores['sentlen_average'] = self.scores['word_count'] / self.scores['sent_count']
if self.scores['word_count'] > 0:
self.scores['wordlen_average'] = self.scores['syll_count'] / self.scores['word_count']
self.scores['wordletter_average'] = self.scores['letter_count'] / self.scores['word_count']
self.scores['wordsent_average'] = self.scores['sent_count'] / self.scores['word_count']
def __getTokenizelanguage(self,locale_lookup):
"""
Try to find a value for provided locale key.
Return "english" by default.
"""
lookup_value = "english"
lookup = {
"cs": "czech",
"da": "danish",
"de": "german",
"el": "greek",
"es": "spanish",
"et": "estonian",
"en": "english",
"fr": "french",
"it": "italian",
"nb": "norwegian",
"nl": "dutch",
"po": "polish",
"pt": "portuguese",
"sl": "slovene",
"sv": "swedish"
}
if locale_lookup in lookup:
lookup_value = lookup[locale_lookup]
return lookup_value
| gpl-2.0 |
hsogo/psychopy_tobii_controller | samples/utility_sample01.py | 1 | 2464 | import matplotlib.pyplot as plt
import numpy as np
# import utility
import psychopy_tobii_controller.utility as util
import psychopy_tobii_controller.constants as const
# Load data file using load_data().
# Return values are two list objects.
# The first one is gaze data, and the other is event data.
gaze_data, event_data = util.load_data('data.tsv')
# Use moving_average() to smooth gaze data if necessary.
# Note that the first argument of moving_average() is not
# gaze data list returned by load_data, but the ELEMENTS of
# the gaze data.
#
# If you want to apply moving average to N-th recording session
# of the gaze data, call moving_average()
#
# util.moving_average(gaze_data[N], n=3)
#
smoothed_data = [util.moving_average(g, n=3) for g in gaze_data]
for trial in range(len(gaze_data)):
# Getting list of fixations using detect_fixation_dt().
# numpy.savetxt() would be convenient to output fixation data
# to a text file.
#
# numpy.savetxt('fix.txt',fixations, fmt='%.1f', delimiter=',')
#
fixations = util.detect_fixation_dt(smoothed_data[trial],
max_dispersion = 50,
min_duration = 100,
eye = 'LR')
# Prepare gaze data to plot.
t = smoothed_data[trial][:,const.TimeStamp]
x = smoothed_data[trial][:,const.GazePointX]
y = smoothed_data[trial][:,const.GazePointY]
# X-Y plot (smoothed data)
plt.subplot(2,2,1)
plt.plot(x, y)
plt.xlim([-800,800])
plt.ylim([-600,600])
plt.xlabel('Horizontal gaze position (pix)')
plt.ylabel('Vertical gaze position (pix)')
# X-Y plot (detected fixations)
plt.subplot(2,2,2)
plt.plot(fixations[:,const.FixX], fixations[:,const.FixY], 'o-')
plt.xlim([-800,800])
plt.ylim([-600,600])
plt.xlabel('Horizontal gaze position (pix)')
plt.ylabel('Vertical gaze position (pix)')
# X-T plot with event data
plt.subplot(2,2,3)
plt.plot(t, x, label='X')
plt.plot(t, y, label='Y')
for event in event_data[trial]:
plt.plot([event[const.EventTime], event[const.EventTime]],
[-800, 800], 'k:')
plt.text(event[const.EventTime], 400, event[const.EventText],
rotation=90)
plt.ylim([-800,800])
plt.xlabel('Time (ms)')
plt.ylabel('Gaze position (pix)')
plt.legend()
plt.show()
| gpl-3.0 |
pratapvardhan/pandas | pandas/tests/scalar/timedelta/test_construction.py | 6 | 8805 | # -*- coding: utf-8 -*-
from datetime import timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import Timedelta
def test_construction():
expected = np.timedelta64(10, 'D').astype('m8[ns]').view('i8')
assert Timedelta(10, unit='d').value == expected
assert Timedelta(10.0, unit='d').value == expected
assert Timedelta('10 days').value == expected
assert Timedelta(days=10).value == expected
assert Timedelta(days=10.0).value == expected
expected += np.timedelta64(10, 's').astype('m8[ns]').view('i8')
assert Timedelta('10 days 00:00:10').value == expected
assert Timedelta(days=10, seconds=10).value == expected
assert Timedelta(days=10, milliseconds=10 * 1000).value == expected
assert Timedelta(days=10,
microseconds=10 * 1000 * 1000).value == expected
# rounding cases
assert Timedelta(82739999850000).value == 82739999850000
assert ('0 days 22:58:59.999850' in str(Timedelta(82739999850000)))
assert Timedelta(123072001000000).value == 123072001000000
assert ('1 days 10:11:12.001' in str(Timedelta(123072001000000)))
# string conversion with/without leading zero
# GH#9570
assert Timedelta('0:00:00') == timedelta(hours=0)
assert Timedelta('00:00:00') == timedelta(hours=0)
assert Timedelta('-1:00:00') == -timedelta(hours=1)
assert Timedelta('-01:00:00') == -timedelta(hours=1)
# more strings & abbrevs
# GH#8190
assert Timedelta('1 h') == timedelta(hours=1)
assert Timedelta('1 hour') == timedelta(hours=1)
assert Timedelta('1 hr') == timedelta(hours=1)
assert Timedelta('1 hours') == timedelta(hours=1)
assert Timedelta('-1 hours') == -timedelta(hours=1)
assert Timedelta('1 m') == timedelta(minutes=1)
assert Timedelta('1.5 m') == timedelta(seconds=90)
assert Timedelta('1 minute') == timedelta(minutes=1)
assert Timedelta('1 minutes') == timedelta(minutes=1)
assert Timedelta('1 s') == timedelta(seconds=1)
assert Timedelta('1 second') == timedelta(seconds=1)
assert Timedelta('1 seconds') == timedelta(seconds=1)
assert Timedelta('1 ms') == timedelta(milliseconds=1)
assert Timedelta('1 milli') == timedelta(milliseconds=1)
assert Timedelta('1 millisecond') == timedelta(milliseconds=1)
assert Timedelta('1 us') == timedelta(microseconds=1)
assert Timedelta('1 micros') == timedelta(microseconds=1)
assert Timedelta('1 microsecond') == timedelta(microseconds=1)
assert Timedelta('1.5 microsecond') == Timedelta('00:00:00.000001500')
assert Timedelta('1 ns') == Timedelta('00:00:00.000000001')
assert Timedelta('1 nano') == Timedelta('00:00:00.000000001')
assert Timedelta('1 nanosecond') == Timedelta('00:00:00.000000001')
# combos
assert Timedelta('10 days 1 hour') == timedelta(days=10, hours=1)
assert Timedelta('10 days 1 h') == timedelta(days=10, hours=1)
assert Timedelta('10 days 1 h 1m 1s') == timedelta(
days=10, hours=1, minutes=1, seconds=1)
assert Timedelta('-10 days 1 h 1m 1s') == -timedelta(
days=10, hours=1, minutes=1, seconds=1)
assert Timedelta('-10 days 1 h 1m 1s') == -timedelta(
days=10, hours=1, minutes=1, seconds=1)
assert Timedelta('-10 days 1 h 1m 1s 3us') == -timedelta(
days=10, hours=1, minutes=1, seconds=1, microseconds=3)
assert Timedelta('-10 days 1 h 1.5m 1s 3us') == -timedelta(
days=10, hours=1, minutes=1, seconds=31, microseconds=3)
# Currently invalid as it has a - on the hh:mm:dd part
# (only allowed on the days)
with pytest.raises(ValueError):
Timedelta('-10 days -1 h 1.5m 1s 3us')
# only leading neg signs are allowed
with pytest.raises(ValueError):
Timedelta('10 days -1 h 1.5m 1s 3us')
# no units specified
with pytest.raises(ValueError):
Timedelta('3.1415')
# invalid construction
tm.assert_raises_regex(ValueError, "cannot construct a Timedelta",
lambda: Timedelta())
tm.assert_raises_regex(ValueError,
"unit abbreviation w/o a number",
lambda: Timedelta('foo'))
tm.assert_raises_regex(ValueError,
"cannot construct a Timedelta from the "
"passed arguments, allowed keywords are ",
lambda: Timedelta(day=10))
# floats
expected = np.timedelta64(
10, 's').astype('m8[ns]').view('i8') + np.timedelta64(
500, 'ms').astype('m8[ns]').view('i8')
assert Timedelta(10.5, unit='s').value == expected
# offset
assert pd.to_timedelta(pd.offsets.Hour(2)) == Timedelta(hours=2)
assert Timedelta(pd.offsets.Hour(2)) == Timedelta(hours=2)
assert Timedelta(pd.offsets.Second(2)) == Timedelta(seconds=2)
# GH#11995: unicode
expected = Timedelta('1H')
result = pd.Timedelta(u'1H')
assert result == expected
assert (pd.to_timedelta(pd.offsets.Hour(2)) ==
Timedelta(u'0 days, 02:00:00'))
with pytest.raises(ValueError):
Timedelta(u'foo bar')
@pytest.mark.parametrize('item', list({'days': 'D',
'seconds': 's',
'microseconds': 'us',
'milliseconds': 'ms',
'minutes': 'm',
'hours': 'h',
'weeks': 'W'}.items()))
@pytest.mark.parametrize('npdtype', [np.int64, np.int32, np.int16,
np.float64, np.float32, np.float16])
def test_td_construction_with_np_dtypes(npdtype, item):
# GH#8757: test construction with np dtypes
pykwarg, npkwarg = item
expected = np.timedelta64(1, npkwarg).astype('m8[ns]').view('i8')
assert Timedelta(**{pykwarg: npdtype(1)}).value == expected
@pytest.mark.parametrize('val', [
'1s', '-1s', '1us', '-1us', '1 day', '-1 day',
'-23:59:59.999999', '-1 days +23:59:59.999999', '-1ns',
'1ns', '-23:59:59.999999999'])
def test_td_from_repr_roundtrip(val):
# round-trip both for string and value
td = Timedelta(val)
assert Timedelta(td.value) == td
# str does not normally display nanos
if not td.nanoseconds:
assert Timedelta(str(td)) == td
assert Timedelta(td._repr_base(format='all')) == td
def test_overflow_on_construction():
# xref https://github.com/statsmodels/statsmodels/issues/3374
value = pd.Timedelta('1day').value * 20169940
with pytest.raises(OverflowError):
pd.Timedelta(value)
# xref GH#17637
with pytest.raises(OverflowError):
pd.Timedelta(7 * 19999, unit='D')
with pytest.raises(OverflowError):
pd.Timedelta(timedelta(days=13 * 19999))
@pytest.mark.parametrize('fmt,exp', [
('P6DT0H50M3.010010012S', Timedelta(days=6, minutes=50, seconds=3,
milliseconds=10, microseconds=10,
nanoseconds=12)),
('P-6DT0H50M3.010010012S', Timedelta(days=-6, minutes=50, seconds=3,
milliseconds=10, microseconds=10,
nanoseconds=12)),
('P4DT12H30M5S', Timedelta(days=4, hours=12, minutes=30, seconds=5)),
('P0DT0H0M0.000000123S', Timedelta(nanoseconds=123)),
('P0DT0H0M0.00001S', Timedelta(microseconds=10)),
('P0DT0H0M0.001S', Timedelta(milliseconds=1)),
('P0DT0H1M0S', Timedelta(minutes=1)),
('P1DT25H61M61S', Timedelta(days=1, hours=25, minutes=61, seconds=61))
])
def test_iso_constructor(fmt, exp):
assert Timedelta(fmt) == exp
@pytest.mark.parametrize('fmt', [
'PPPPPPPPPPPP', 'PDTHMS', 'P0DT999H999M999S',
'P1DT0H0M0.0000000000000S', 'P1DT0H0M00000000000S',
'P1DT0H0M0.S'])
def test_iso_constructor_raises(fmt):
with tm.assert_raises_regex(ValueError, 'Invalid ISO 8601 Duration '
'format - {}'.format(fmt)):
Timedelta(fmt)
@pytest.mark.parametrize('constructed_td, conversion', [
(Timedelta(nanoseconds=100), '100ns'),
(Timedelta(days=1, hours=1, minutes=1, weeks=1, seconds=1, milliseconds=1,
microseconds=1, nanoseconds=1), 694861001001001),
(Timedelta(microseconds=1) + Timedelta(nanoseconds=1), '1us1ns'),
(Timedelta(microseconds=1) - Timedelta(nanoseconds=1), '999ns'),
(Timedelta(microseconds=1) + 5 * Timedelta(nanoseconds=-2), '990ns')])
def test_td_constructor_on_nanoseconds(constructed_td, conversion):
# GH#9273
assert constructed_td == Timedelta(conversion)
def test_td_constructor_value_error():
with pytest.raises(TypeError):
Timedelta(nanoseconds='abc')
| bsd-3-clause |
rs2/pandas | pandas/tests/arrays/masked/test_arrow_compat.py | 2 | 1505 | import pytest
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
arrays = [pd.array([1, 2, 3, None], dtype=dtype) for dtype in tm.ALL_EA_INT_DTYPES]
arrays += [pd.array([True, False, True, None], dtype="boolean")]
@pytest.fixture(params=arrays, ids=[a.dtype.name for a in arrays])
def data(request):
return request.param
@td.skip_if_no("pyarrow", min_version="0.15.0")
def test_arrow_array(data):
# protocol added in 0.15.0
import pyarrow as pa
arr = pa.array(data)
expected = pa.array(
data.to_numpy(object, na_value=None),
type=pa.from_numpy_dtype(data.dtype.numpy_dtype),
)
assert arr.equals(expected)
@td.skip_if_no("pyarrow", min_version="0.16.0")
def test_arrow_roundtrip(data):
# roundtrip possible from arrow 0.16.0
import pyarrow as pa
df = pd.DataFrame({"a": data})
table = pa.table(df)
assert table.field("a").type == str(data.dtype.numpy_dtype)
result = table.to_pandas()
assert result["a"].dtype == data.dtype
tm.assert_frame_equal(result, df)
@td.skip_if_no("pyarrow", min_version="0.16.0")
def test_arrow_from_arrow_uint():
# https://github.com/pandas-dev/pandas/issues/31896
# possible mismatch in types
import pyarrow as pa
dtype = pd.UInt32Dtype()
result = dtype.__from_arrow__(pa.array([1, 2, 3, 4, None], type="int64"))
expected = pd.array([1, 2, 3, 4, None], dtype="UInt32")
tm.assert_extension_array_equal(result, expected)
| bsd-3-clause |
q1ang/scikit-learn | sklearn/linear_model/setup.py | 146 | 1713 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linear_model', parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension('cd_fast', sources=['cd_fast.c'],
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]), **blas_info)
config.add_extension('sgd_fast',
sources=['sgd_fast.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
config.add_extension('sag_fast',
sources=['sag_fast.c'],
include_dirs=numpy.get_include())
# add other directories
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
michaelbrundage/vowpal_wabbit | python/vowpalwabbit/sklearn_vw.py | 7 | 20683 | # -*- coding: utf-8 -*-
# pylint: unused-argument, invalid-name, too-many-arguments, too-many-locals
"""
Utilities to support integration of Vowpal Wabbit and scikit-learn
"""
import numpy as np
import re
import io
from scipy.sparse import csr_matrix
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.linear_model.base import LinearClassifierMixin, SparseCoefMixin
from sklearn.datasets.svmlight_format import dump_svmlight_file
from sklearn.utils.validation import check_is_fitted
from sklearn.externals import joblib
from vowpalwabbit import pyvw
DEFAULT_NS = ''
CONSTANT_HASH = 116060
INVALID_CHARS = re.compile(r"[\|: \n]+")
class VW(BaseEstimator):
"""Vowpal Wabbit Scikit-learn Base Estimator wrapper
Attributes
----------
params : {dict}
dictionary of model parameter keys and values
fit_ : {bool}
this variable is only created after the model is fitted
"""
params = dict()
def __init__(self,
probabilities=None,
random_seed=None,
ring_size=None,
convert_to_vw=None,
bfgs=None,
mem=None,
ftrl=None,
ftrl_alpha=None,
ftrl_beta=None,
learning_rate=None,
l=None,
power_t=None,
decay_learning_rate=None,
initial_t=None,
feature_mask=None,
initial_regressor=None,
i=None,
initial_weight=None,
random_weights=None,
input_feature_regularizer=None,
audit=None,
a=None,
progress=None,
P=None,
quiet=None,
no_stdin=None,
hash=None,
ignore=None,
keep=None,
redefine=None,
bit_precision=None,
b=None,
noconstant=None,
constant=None,
C=None,
ngram=None,
skips=None,
feature_limit=None,
affix=None,
spelling=None,
dictionary=None,
dictionary_path=None,
interactions=None,
permutations=None,
leave_duplicate_interactions=None,
quadratic=None,
q=None,
cubic=None,
testonly=None,
t=None,
min_prediction=None,
max_prediction=None,
sort_features=None,
loss_function=None,
link=None,
quantile_tau=None,
l1=None,
l2=None,
named_labels=None,
final_regressor=None,
f=None,
readable_model=None,
invert_hash=None,
passes=None,
save_resume=None,
output_feature_regularizer_binary=None,
output_feature_regularizer_text=None,
oaa=None,
ect=None,
csoaa=None,
wap=None):
"""VW model constructor, exposing all supported parameters to keep sklearn happy
Parameters
----------
probabilities
random_seed (int): seed random number generator
ring_size (int): size of example ring
convert_to_vw (bool): flag to convert X input to vw format
Update options
bfgs: use L-BFGS optimization algorithm
mem: set the rank of the inverse hessian approximation used by bfgs
ftrl: use FTRL-Proximal optimization algorithm
ftrl_alpha: ftrl alpha parameter
ftrl_beta: ftrl beta parameter
learning_rate,l (float): Set learning rate
power_t (float): t power value
decay_learning_rate (float): Set Decay factor for learning_rate between passes
initial_t (float): initial t value
feature_mask (str): Use existing regressor to determine which parameters may be updated.
If no initial_regressor given, also used for initial weights.
Weight options
initial_regressor,i (str): Initial regressor(s)
initial_weight (float): Set all weights to an initial value of arg.
random_weights (bool): make initial weights random
input_feature_regularizer (str): Per feature regularization input file
Diagnostic options
audit,a (bool): print weights of features
progress,P (str): Progress update frequency. int: additive, float: multiplicative
quiet (bool): Don't output disgnostics and progress updates
Feature options
hash (str): how to hash the features. Available options: strings, all
ignore (str): ignore namespaces beginning with character <arg>
keep (str): keep namespaces beginning with character <arg>
redefine (str): Redefine namespaces beginning with characters of string S as namespace N. <arg> shall be in
form 'N:=S' where := is operator. Empty N or S are treated as default namespace.
Use ':' as a wildcard in S.
bit_precision,b (int): number of bits in the feature table
noconstant (bool): Don't add a constant feature
constant,C (float): Set initial value of constant
ngram (str): Generate N grams. To generate N grams for a single namespace 'foo', arg should be fN.
skips (str): Generate skips in N grams. This in conjunction with the ngram tag can be used to generate
generalized n-skip-k-gram. To generate n-skips for a single namespace 'foo', arg should be fN.
feature_limit (str): limit to N features. To apply to a single namespace 'foo', arg should be fN
affix (str): generate prefixes/suffixes of features; argument '+2a,-3b,+1' means generate 2-char prefixes for
namespace a, 3-char suffixes for b and 1 char prefixes for default namespace
spelling (str): compute spelling features for a give namespace (use '_' for default namespace)
dictionary (str): read a dictionary for additional features (arg either 'x:file' or just 'file')
dictionary_path (str): look in this directory for dictionaries; defaults to current directory or env{PATH}
interactions (str): Create feature interactions of any level between namespaces.
permutations (bool): Use permutations instead of combinations for feature interactions of same namespace.
leave_duplicate_interactions (bool): Don't remove interactions with duplicate combinations of namespaces. For
ex. this is a duplicate: '-q ab -q ba' and a lot more in '-q ::'.
quadratic,q (str): Create and use quadratic features, q:: corresponds to a wildcard for all printable characters
cubic (str): Create and use cubic features
Example options
testonly,t (bool): Ignore label information and just test
min_prediction (float): Smallest prediction to output
max_prediction (float): Largest prediction to output
sort_features (bool): turn this on to disregard order in which features have been defined. This will lead to
smaller cache sizes
loss_function (str): default_value("squared"), "Specify the loss function to be used, uses squared by default.
Currently available ones are squared, classic, hinge, logistic and quantile.
link (str): apply a link function to convert output: e.g. 'logistic'
quantile_tau (float): default_value(0.5), "Parameter \\tau associated with Quantile loss. Defaults to 0.5
l1 (float): l_1 lambda
l2 (float): l_2 lambda
named_labels (str): use names for labels (multiclass, etc.) rather than integers, argument specified all
possible labels, comma-sep, eg \"--named_labels Noun,Verb,Adj,Punc\"
Output model
final_regressor,f (str): Final regressor
readable_model (str): Output human-readable final regressor with numeric features
invert_hash (str): Output human-readable final regressor with feature names. Computationally expensive.
passes (int): Number of training passes
save_resume (bool): save extra state so learning can be resumed later with new data
output_feature_regularizer_binary (str): Per feature regularization output file
output_feature_regularizer_text (str): Per feature regularization output file, in text
Multiclass options
oaa (int): Use one-against-all multiclass learning with labels
ect (int): Use error correcting tournament multiclass learning
csoaa (int): Use cost sensitive one-against-all multiclass learning
wap (int): Use weighted all pairs multiclass learning
Contextual Bandit Optimization
cb (int): Use contextual bandit learning with specified costs
cbify (int): Convert multiclass on <k> classes into a contextual bandit problem
Returns
-------
(BaseEstimator): Returns self
"""
# clear estimator attributes
if hasattr(self, 'fit_'):
del self.fit_
if hasattr(self, 'passes_'):
del self.passes_
if hasattr(self, 'convert_to_vw_'):
del self.convert_to_vw_
if hasattr(self, 'vw_'):
del self.vw_
# reset params and quiet models by default
self.params = {'quiet': True}
# assign all valid args to params dict
args = dict(locals())
for k, v in args.items():
if k != 'self' and k != '__class__' and v is not None:
self.params[k] = v
# store passes separately to be used in fit
self.passes_ = self.params.pop('passes', 1)
# pull out convert_to_vw from params
self.convert_to_vw_ = self.params.pop('convert_to_vw', True)
self.vw_ = None
super(VW, self).__init__()
def get_vw(self):
"""Factory to create a vw instance on demand
Returns
-------
pyvw.vw instance
"""
if self.vw_ is None:
self.vw_ = pyvw.vw(**self.params)
return self.vw_
def fit(self, X, y=None, sample_weight=None):
"""Fit the model according to the given training data
TODO: for first pass create and store example objects.
for N-1 passes use example objects directly (simulate cache file...but in memory for faster processing)
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features or 1 if not convert_to_vw) or
Training vector, where n_samples in the number of samples and
n_features is the number of features.
if not using convert_to_vw, X is expected to be a list of vw formatted feature vector strings with labels
y : array-like, shape (n_samples,), optional if not convert_to_vw
Target vector relative to X.
sample_weight : array-like, shape (n_samples,)
sample weight vector relative to X.
Returns
-------
return self so pipeline can call transform() after fit
"""
if self.convert_to_vw_:
X = tovw(x=X, y=y, sample_weight=sample_weight)
model = self.get_vw()
# add examples to model
for n in range(self.passes_):
if n > 1:
np.random.shuffle(X)
for idx, x in enumerate(X):
model.learn(x)
self.fit_ = True
return self
def transform(self, X, y=None):
"""Transform does nothing by default besides closing the model. Transform is required for any estimator
in a sklearn pipeline that isn't the final estimator
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features or 1 if not convert_to_vw) or
Training vector, where n_samples in the number of samples and
n_features is the number of features.
if not using convert_to_vw, X is expected to be a list of vw formatted feature vector strings with labels
y : array-like, shape (n_samples,), optional if not convert_to_vw
Target vector relative to X.
Returns
-------
return X to be passed into next estimator in pipeline
"""
if not self.get_vw().finished:
self.get_vw().finish()
return X
def predict(self, X):
"""Predict with Vowpal Wabbit model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features or 1)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
if not using convert_to_vw, X is expected to be a list of vw formatted feature vector strings with labels
Returns
-------
y : array-like, shape (n_samples,)
Output vector relative to X.
"""
check_is_fitted(self, 'fit_')
try:
num_samples = X.shape[0] if X.ndim > 1 else len(X)
except AttributeError:
num_samples = len(X)
if self.convert_to_vw_:
X = tovw(X)
model = self.get_vw()
label_type = model.get_label_type()
y = np.empty([num_samples])
# add test examples to model
for idx, x in enumerate(X):
y[idx] = model.predict(ec=x, labelType=label_type)
return y
def __str__(self):
if self.params is not None:
return str(self.params)
def __repr__(self):
return self.__str__()
def __del__(self):
if self.vw_ is not None:
self.vw_.__del__()
def get_params(self, deep=True):
"""This returns the set of vw and estimator parameters currently in use"""
out = dict()
# add in the vw params
out.update(self.params)
# add in the estimator params
out['passes'] = self.passes_
out['convert_to_vw'] = self.convert_to_vw_
return out
def set_params(self, **params):
"""This destroys and recreates the Vowpal Wabbit model with updated parameters
any parameters not provided will remain as they were initialized to at construction
Parameters
----------
params : {dict}
dictionary of model parameter keys and values to update
"""
self.params.update(params)
# manage passes and convert_to_vw params different because they are estimator params, not vw params
if 'passes' not in params:
self.params['passes'] = self.passes_
if 'convert_to_vw' not in params:
self.params['convert_to_vw'] = self.convert_to_vw_
self.__init__(**self.params)
return self
def get_coefs(self):
"""Returns coefficient weights as ordered sparse matrix
Returns
-------
{sparse matrix} coefficient weights for model
"""
model = self.get_vw()
return csr_matrix([model.get_weight(i) for i in range(model.num_weights())])
def set_coefs(self, coefs):
"""Sets coefficients weights from ordered sparse matrix
Parameters
----------
coefs : {sparse matrix} coefficient weights for model
"""
model = self.get_vw()
for i in range(coefs.getnnz()):
model.set_weight(int(coefs.indices[i]), 0, float(coefs.data[i]))
def get_intercept(self):
""" Returns intercept weight for model
Returns
-------
{int} intercept value, 0 if noconstant
"""
return self.get_vw().get_weight(CONSTANT_HASH)
def save(self, filename):
joblib.dump(dict(params=self.get_params(), coefs=self.get_coefs(), fit=self.fit_), filename=filename)
def load(self, filename):
obj = joblib.load(filename=filename)
self.set_params(**obj['params'])
self.set_coefs(obj['coefs'])
self.fit_ = obj['fit']
class ThresholdingLinearClassifierMixin(LinearClassifierMixin):
"""Mixin for linear classifiers. A threshold is used to specify the positive
class cutoff
Handles prediction for sparse and dense X.
"""
classes_ = np.array([-1., 1.])
def __init__(self, **params):
# assume 0 as positive score threshold
self.pos_threshold = params.pop('pos_threshold', 0.0)
super(ThresholdingLinearClassifierMixin, self).__init__(**params)
def predict(self, X):
"""Predict class labels for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples]
Predicted class label per sample.
"""
scores = self.decision_function(X)
if len(scores.shape) == 1:
indices = (scores >= self.pos_threshold).astype(np.int)
else:
indices = scores.argmax(axis=1)
return self.classes_[indices]
class VWClassifier(SparseCoefMixin, ThresholdingLinearClassifierMixin, VW):
"""Vowpal Wabbit Classifier model
Only supports binary classification currently. Use VW directly for multiclass classification
note - don't try to apply link='logistic' on top of the existing functionality
"""
def __init__(self, **params):
# assume logistic loss functions
if 'loss_function' not in params:
params['loss_function'] = 'logistic'
super(VWClassifier, self).__init__(**params)
def decision_function(self, X):
"""Predict confidence scores for samples.
The confidence score for a sample is the signed distance of that
sample to the hyperplane.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
Returns
-------
array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes)
Confidence scores per (sample, class) combination. In the binary
case, confidence score for self.classes_[1] where >0 means this
class would be predicted.
"""
return VW.predict(self, X=X)
class VWRegressor(VW, RegressorMixin):
"""Vowpal Wabbit Regressor model """
pass
def tovw(x, y=None, sample_weight=None):
"""Convert array or sparse matrix to Vowpal Wabbit format
Parameters
----------
x : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : {array-like}, shape (n_samples,), optional
Target vector relative to X.
sample_weight : {array-like}, shape (n_samples,), optional
sample weight vector relative to X.
Returns
-------
out : {array-like}, shape (n_samples, 1)
Training vectors in VW string format
"""
use_truth = y is not None
use_weight = sample_weight is not None
# convert to numpy array if needed
if not isinstance(x, (np.ndarray, csr_matrix)):
x = np.array(x)
if not isinstance(y, np.ndarray):
y = np.array(y)
# make sure this is a 2d array
if x.ndim == 1:
x = x.reshape(1, -1)
if y.ndim == 0:
y = y.reshape(1)
rows, cols = x.shape
# check for invalid characters if array has string values
if x.dtype.char == 'S':
for row in rows:
for col in cols:
x[row, col] = INVALID_CHARS.sub('.', x[row, col])
# convert input to svmlight format
s = io.BytesIO()
dump_svmlight_file(x, np.zeros(rows), s)
# parse entries to construct VW format
rows = s.getvalue().decode('ascii').split('\n')[:-1]
out = []
for idx, row in enumerate(rows):
truth = y[idx] if use_truth else 1
weight = sample_weight[idx] if use_weight else 1
features = row.split('0 ', 1)[1]
# only using a single namespace and no tags
out.append(('{y} {w} |{ns} {x}'.format(y=truth, w=weight, ns=DEFAULT_NS, x=features)))
s.close()
return out
| bsd-3-clause |
wanggang3333/scikit-learn | sklearn/ensemble/__init__.py | 217 | 1307 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
cogmission/nupic | examples/opf/clients/hotgym/anomaly/one_gym/run.py | 34 | 4938 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Groups together code used for creating a NuPIC model and dealing with IO.
(This is a component of the One Hot Gym Anomaly Tutorial.)
"""
import importlib
import sys
import csv
import datetime
from nupic.data.inference_shifter import InferenceShifter
from nupic.frameworks.opf.modelfactory import ModelFactory
import nupic_anomaly_output
DESCRIPTION = (
"Starts a NuPIC model from the model params returned by the swarm\n"
"and pushes each line of input from the gym into the model. Results\n"
"are written to an output file (default) or plotted dynamically if\n"
"the --plot option is specified.\n"
)
GYM_NAME = "rec-center-hourly"
DATA_DIR = "."
MODEL_PARAMS_DIR = "./model_params"
# '7/2/10 0:00'
DATE_FORMAT = "%m/%d/%y %H:%M"
def createModel(modelParams):
"""
Given a model params dictionary, create a CLA Model. Automatically enables
inference for kw_energy_consumption.
:param modelParams: Model params dict
:return: OPF Model object
"""
model = ModelFactory.create(modelParams)
model.enableInference({"predictedField": "kw_energy_consumption"})
return model
def getModelParamsFromName(gymName):
"""
Given a gym name, assumes a matching model params python module exists within
the model_params directory and attempts to import it.
:param gymName: Gym name, used to guess the model params module name.
:return: OPF Model params dictionary
"""
importName = "model_params.%s_model_params" % (
gymName.replace(" ", "_").replace("-", "_")
)
print "Importing model params from %s" % importName
try:
importedModelParams = importlib.import_module(importName).MODEL_PARAMS
except ImportError:
raise Exception("No model params exist for '%s'. Run swarm first!"
% gymName)
return importedModelParams
def runIoThroughNupic(inputData, model, gymName, plot):
"""
Handles looping over the input data and passing each row into the given model
object, as well as extracting the result object and passing it into an output
handler.
:param inputData: file path to input data CSV
:param model: OPF Model object
:param gymName: Gym name, used for output handler naming
:param plot: Whether to use matplotlib or not. If false, uses file output.
"""
inputFile = open(inputData, "rb")
csvReader = csv.reader(inputFile)
# skip header rows
csvReader.next()
csvReader.next()
csvReader.next()
shifter = InferenceShifter()
if plot:
output = nupic_anomaly_output.NuPICPlotOutput(gymName)
else:
output = nupic_anomaly_output.NuPICFileOutput(gymName)
counter = 0
for row in csvReader:
counter += 1
if (counter % 100 == 0):
print "Read %i lines..." % counter
timestamp = datetime.datetime.strptime(row[0], DATE_FORMAT)
consumption = float(row[1])
result = model.run({
"timestamp": timestamp,
"kw_energy_consumption": consumption
})
if plot:
result = shifter.shift(result)
prediction = result.inferences["multiStepBestPredictions"][1]
anomalyScore = result.inferences["anomalyScore"]
output.write(timestamp, consumption, prediction, anomalyScore)
inputFile.close()
output.close()
def runModel(gymName, plot=False):
"""
Assumes the gynName corresponds to both a like-named model_params file in the
model_params directory, and that the data exists in a like-named CSV file in
the current directory.
:param gymName: Important for finding model params and input CSV file
:param plot: Plot in matplotlib? Don't use this unless matplotlib is
installed.
"""
print "Creating model from %s..." % gymName
model = createModel(getModelParamsFromName(gymName))
inputData = "%s/%s.csv" % (DATA_DIR, gymName.replace(" ", "_"))
runIoThroughNupic(inputData, model, gymName, plot)
if __name__ == "__main__":
print DESCRIPTION
plot = False
args = sys.argv[1:]
if "--plot" in args:
plot = True
runModel(GYM_NAME, plot=plot)
| agpl-3.0 |
btabibian/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/pandas/tseries/tests/test_period.py | 9 | 153010 | """Tests suite for Period handling.
Parts derived from scikits.timeseries code, original authors:
- Pierre Gerard-Marchant & Matt Knox
- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
"""
from datetime import datetime, date, timedelta
from numpy.ma.testutils import assert_equal
from pandas import Timestamp
from pandas.tseries.frequencies import MONTHS, DAYS, _period_code_map
from pandas.tseries.period import Period, PeriodIndex, period_range
from pandas.tseries.index import DatetimeIndex, date_range, Index
from pandas.tseries.tools import to_datetime
import pandas.tseries.period as period
import pandas.tseries.offsets as offsets
import pandas.core.datetools as datetools
import pandas as pd
import numpy as np
from numpy.random import randn
from pandas.compat import range, lrange, lmap, zip
from pandas import Series, DataFrame, _np_version_under1p9
from pandas import tslib
from pandas.util.testing import(assert_series_equal, assert_almost_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas import compat
class TestPeriodProperties(tm.TestCase):
"Test properties such as year, month, weekday, etc...."
#
def test_quarterly_negative_ordinals(self):
p = Period(ordinal=-1, freq='Q-DEC')
self.assertEqual(p.year, 1969)
self.assertEqual(p.quarter, 4)
p = Period(ordinal=-2, freq='Q-DEC')
self.assertEqual(p.year, 1969)
self.assertEqual(p.quarter, 3)
p = Period(ordinal=-2, freq='M')
self.assertEqual(p.year, 1969)
self.assertEqual(p.month, 11)
def test_period_cons_quarterly(self):
# bugs in scikits.timeseries
for month in MONTHS:
freq = 'Q-%s' % month
exp = Period('1989Q3', freq=freq)
self.assertIn('1989Q3', str(exp))
stamp = exp.to_timestamp('D', how='end')
p = Period(stamp, freq=freq)
self.assertEqual(p, exp)
stamp = exp.to_timestamp('3D', how='end')
p = Period(stamp, freq=freq)
self.assertEqual(p, exp)
def test_period_cons_annual(self):
# bugs in scikits.timeseries
for month in MONTHS:
freq = 'A-%s' % month
exp = Period('1989', freq=freq)
stamp = exp.to_timestamp('D', how='end') + timedelta(days=30)
p = Period(stamp, freq=freq)
self.assertEqual(p, exp + 1)
def test_period_cons_weekly(self):
for num in range(10, 17):
daystr = '2011-02-%d' % num
for day in DAYS:
freq = 'W-%s' % day
result = Period(daystr, freq=freq)
expected = Period(daystr, freq='D').asfreq(freq)
self.assertEqual(result, expected)
def test_period_cons_nat(self):
p = Period('NaT', freq='M')
self.assertEqual(p.ordinal, tslib.iNaT)
self.assertEqual(p.freq, 'M')
self.assertEqual((p + 1).ordinal, tslib.iNaT)
p = Period('nat', freq='W-SUN')
self.assertEqual(p.ordinal, tslib.iNaT)
self.assertEqual(p.freq, 'W-SUN')
self.assertEqual((p + 1).ordinal, tslib.iNaT)
p = Period(tslib.iNaT, freq='D')
self.assertEqual(p.ordinal, tslib.iNaT)
self.assertEqual(p.freq, 'D')
self.assertEqual((p + 1).ordinal, tslib.iNaT)
p = Period(tslib.iNaT, freq='3D')
self.assertEqual(p.ordinal, tslib.iNaT)
self.assertEqual(p.freq, offsets.Day(3))
self.assertEqual(p.freqstr, '3D')
self.assertEqual((p + 1).ordinal, tslib.iNaT)
self.assertRaises(ValueError, Period, 'NaT')
def test_period_cons_mult(self):
p1 = Period('2011-01', freq='3M')
p2 = Period('2011-01', freq='M')
self.assertEqual(p1.ordinal, p2.ordinal)
self.assertEqual(p1.freq, offsets.MonthEnd(3))
self.assertEqual(p1.freqstr, '3M')
self.assertEqual(p2.freq, offsets.MonthEnd())
self.assertEqual(p2.freqstr, 'M')
result = p1 + 1
self.assertEqual(result.ordinal, (p2 + 3).ordinal)
self.assertEqual(result.freq, p1.freq)
self.assertEqual(result.freqstr, '3M')
result = p1 - 1
self.assertEqual(result.ordinal, (p2 - 3).ordinal)
self.assertEqual(result.freq, p1.freq)
self.assertEqual(result.freqstr, '3M')
msg = ('Frequency must be positive, because it'
' represents span: -3M')
with tm.assertRaisesRegexp(ValueError, msg):
Period('2011-01', freq='-3M')
msg = ('Frequency must be positive, because it'
' represents span: 0M')
with tm.assertRaisesRegexp(ValueError, msg):
Period('2011-01', freq='0M')
def test_timestamp_tz_arg(self):
tm._skip_if_no_pytz()
import pytz
for case in ['Europe/Brussels', 'Asia/Tokyo', 'US/Pacific']:
p = Period('1/1/2005', freq='M').to_timestamp(tz=case)
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
exp_zone = pytz.timezone(case).normalize(p)
self.assertEqual(p, exp)
self.assertEqual(p.tz, exp_zone.tzinfo)
self.assertEqual(p.tz, exp.tz)
p = Period('1/1/2005', freq='3H').to_timestamp(tz=case)
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
exp_zone = pytz.timezone(case).normalize(p)
self.assertEqual(p, exp)
self.assertEqual(p.tz, exp_zone.tzinfo)
self.assertEqual(p.tz, exp.tz)
p = Period('1/1/2005', freq='A').to_timestamp(freq='A', tz=case)
exp = Timestamp('31/12/2005', tz='UTC').tz_convert(case)
exp_zone = pytz.timezone(case).normalize(p)
self.assertEqual(p, exp)
self.assertEqual(p.tz, exp_zone.tzinfo)
self.assertEqual(p.tz, exp.tz)
p = Period('1/1/2005', freq='A').to_timestamp(freq='3H', tz=case)
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
exp_zone = pytz.timezone(case).normalize(p)
self.assertEqual(p, exp)
self.assertEqual(p.tz, exp_zone.tzinfo)
self.assertEqual(p.tz, exp.tz)
def test_timestamp_tz_arg_dateutil(self):
from pandas.tslib import _dateutil_gettz as gettz
from pandas.tslib import maybe_get_tz
for case in ['dateutil/Europe/Brussels', 'dateutil/Asia/Tokyo',
'dateutil/US/Pacific']:
p = Period('1/1/2005', freq='M').to_timestamp(tz=maybe_get_tz(case))
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
self.assertEqual(p, exp)
self.assertEqual(p.tz, gettz(case.split('/', 1)[1]))
self.assertEqual(p.tz, exp.tz)
p = Period('1/1/2005', freq='M').to_timestamp(freq='3H', tz=maybe_get_tz(case))
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
self.assertEqual(p, exp)
self.assertEqual(p.tz, gettz(case.split('/', 1)[1]))
self.assertEqual(p.tz, exp.tz)
def test_timestamp_tz_arg_dateutil_from_string(self):
from pandas.tslib import _dateutil_gettz as gettz
p = Period('1/1/2005', freq='M').to_timestamp(tz='dateutil/Europe/Brussels')
self.assertEqual(p.tz, gettz('Europe/Brussels'))
def test_timestamp_nat_tz(self):
t = Period('NaT', freq='M').to_timestamp()
self.assertTrue(t is tslib.NaT)
t = Period('NaT', freq='M').to_timestamp(tz='Asia/Tokyo')
self.assertTrue(t is tslib.NaT)
def test_timestamp_mult(self):
p = pd.Period('2011-01', freq='M')
self.assertEqual(p.to_timestamp(how='S'), pd.Timestamp('2011-01-01'))
self.assertEqual(p.to_timestamp(how='E'), pd.Timestamp('2011-01-31'))
p = pd.Period('2011-01', freq='3M')
self.assertEqual(p.to_timestamp(how='S'), pd.Timestamp('2011-01-01'))
self.assertEqual(p.to_timestamp(how='E'), pd.Timestamp('2011-03-31'))
def test_timestamp_nat_mult(self):
for freq in ['M', '3M']:
p = pd.Period('NaT', freq=freq)
self.assertTrue(p.to_timestamp(how='S') is pd.NaT)
self.assertTrue(p.to_timestamp(how='E') is pd.NaT)
def test_period_constructor(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
self.assertEqual(i1, i2)
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
self.assertEqual(i1, i2)
self.assertEqual(i1, i3)
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
self.assertRaises(ValueError, i1.__ne__, i4)
self.assertEqual(i4, i5)
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
self.assertEqual(i1, i2)
self.assertEqual(i1, i3)
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/10/12', freq='D')
self.assertEqual(i1, i2.asfreq('B'))
i2 = Period('3/11/12', freq='D')
self.assertEqual(i1, i2.asfreq('B'))
i2 = Period('3/12/12', freq='D')
self.assertEqual(i1, i2.asfreq('B'))
i3 = Period('3/10/12', freq='b')
self.assertEqual(i1, i3)
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
self.assertEqual(i1, i2)
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
self.assertEqual(i1, i2)
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
self.assertEqual(i1, i2)
i3 = Period(year=2005, month=3, day=1, freq='d')
self.assertEqual(i1, i3)
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
self.assertEqual(i1, i2)
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
self.assertEqual(i1, i2)
self.assertEqual(i1, i3)
i1 = Period('05Q1')
self.assertEqual(i1, i2)
lower = Period('05q1')
self.assertEqual(i1, lower)
i1 = Period('1Q2005')
self.assertEqual(i1, i2)
lower = Period('1q2005')
self.assertEqual(i1, lower)
i1 = Period('1Q05')
self.assertEqual(i1, i2)
lower = Period('1q05')
self.assertEqual(i1, lower)
i1 = Period('4Q1984')
self.assertEqual(i1.year, 1984)
lower = Period('4q1984')
self.assertEqual(i1, lower)
i1 = Period('1982', freq='min')
i2 = Period('1982', freq='MIN')
self.assertEqual(i1, i2)
i2 = Period('1982', freq=('Min', 1))
self.assertEqual(i1, i2)
expected = Period('2007-01', freq='M')
i1 = Period('200701', freq='M')
self.assertEqual(i1, expected)
i1 = Period('200701', freq='M')
self.assertEqual(i1, expected)
i1 = Period(200701, freq='M')
self.assertEqual(i1, expected)
i1 = Period(ordinal=200701, freq='M')
self.assertEqual(i1.year, 18695)
i1 = Period(datetime(2007, 1, 1), freq='M')
i2 = Period('200701', freq='M')
self.assertEqual(i1, i2)
i1 = Period(date(2007, 1, 1), freq='M')
i2 = Period(datetime(2007, 1, 1), freq='M')
i3 = Period(np.datetime64('2007-01-01'), freq='M')
i4 = Period(np.datetime64('2007-01-01 00:00:00Z'), freq='M')
i5 = Period(np.datetime64('2007-01-01 00:00:00.000Z'), freq='M')
self.assertEqual(i1, i2)
self.assertEqual(i1, i3)
self.assertEqual(i1, i4)
self.assertEqual(i1, i5)
i1 = Period('2007-01-01 09:00:00.001')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq='L')
self.assertEqual(i1, expected)
expected = Period(np.datetime64('2007-01-01 09:00:00.001Z'), freq='L')
self.assertEqual(i1, expected)
i1 = Period('2007-01-01 09:00:00.00101')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq='U')
self.assertEqual(i1, expected)
expected = Period(np.datetime64('2007-01-01 09:00:00.00101Z'),
freq='U')
self.assertEqual(i1, expected)
self.assertRaises(ValueError, Period, ordinal=200701)
self.assertRaises(ValueError, Period, '2007-1-1', freq='X')
def test_period_constructor_offsets(self):
self.assertEqual(Period('1/1/2005', freq=offsets.MonthEnd()),
Period('1/1/2005', freq='M'))
self.assertEqual(Period('2005', freq=offsets.YearEnd()),
Period('2005', freq='A'))
self.assertEqual(Period('2005', freq=offsets.MonthEnd()),
Period('2005', freq='M'))
self.assertEqual(Period('3/10/12', freq=offsets.BusinessDay()),
Period('3/10/12', freq='B'))
self.assertEqual(Period('3/10/12', freq=offsets.Day()),
Period('3/10/12', freq='D'))
self.assertEqual(Period(year=2005, quarter=1,
freq=offsets.QuarterEnd(startingMonth=12)),
Period(year=2005, quarter=1, freq='Q'))
self.assertEqual(Period(year=2005, quarter=2,
freq=offsets.QuarterEnd(startingMonth=12)),
Period(year=2005, quarter=2, freq='Q'))
self.assertEqual(Period(year=2005, month=3, day=1, freq=offsets.Day()),
Period(year=2005, month=3, day=1, freq='D'))
self.assertEqual(Period(year=2012, month=3, day=10, freq=offsets.BDay()),
Period(year=2012, month=3, day=10, freq='B'))
expected = Period('2005-03-01', freq='3D')
self.assertEqual(Period(year=2005, month=3, day=1, freq=offsets.Day(3)),
expected)
self.assertEqual(Period(year=2005, month=3, day=1, freq='3D'),
expected)
self.assertEqual(Period(year=2012, month=3, day=10, freq=offsets.BDay(3)),
Period(year=2012, month=3, day=10, freq='3B'))
self.assertEqual(Period(200701, freq=offsets.MonthEnd()),
Period(200701, freq='M'))
i1 = Period(ordinal=200701, freq=offsets.MonthEnd())
i2 = Period(ordinal=200701, freq='M')
self.assertEqual(i1, i2)
self.assertEqual(i1.year, 18695)
self.assertEqual(i2.year, 18695)
i1 = Period(datetime(2007, 1, 1), freq='M')
i2 = Period('200701', freq='M')
self.assertEqual(i1, i2)
i1 = Period(date(2007, 1, 1), freq='M')
i2 = Period(datetime(2007, 1, 1), freq='M')
i3 = Period(np.datetime64('2007-01-01'), freq='M')
i4 = Period(np.datetime64('2007-01-01 00:00:00Z'), freq='M')
i5 = Period(np.datetime64('2007-01-01 00:00:00.000Z'), freq='M')
self.assertEqual(i1, i2)
self.assertEqual(i1, i3)
self.assertEqual(i1, i4)
self.assertEqual(i1, i5)
i1 = Period('2007-01-01 09:00:00.001')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq='L')
self.assertEqual(i1, expected)
expected = Period(np.datetime64('2007-01-01 09:00:00.001Z'), freq='L')
self.assertEqual(i1, expected)
i1 = Period('2007-01-01 09:00:00.00101')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq='U')
self.assertEqual(i1, expected)
expected = Period(np.datetime64('2007-01-01 09:00:00.00101Z'),
freq='U')
self.assertEqual(i1, expected)
self.assertRaises(ValueError, Period, ordinal=200701)
self.assertRaises(ValueError, Period, '2007-1-1', freq='X')
def test_freq_str(self):
i1 = Period('1982', freq='Min')
self.assertEqual(i1.freq, offsets.Minute())
self.assertEqual(i1.freqstr, 'T')
def test_repr(self):
p = Period('Jan-2000')
self.assertIn('2000-01', repr(p))
p = Period('2000-12-15')
self.assertIn('2000-12-15', repr(p))
def test_repr_nat(self):
p = Period('nat', freq='M')
self.assertIn(repr(tslib.NaT), repr(p))
def test_millisecond_repr(self):
p = Period('2000-01-01 12:15:02.123')
self.assertEqual("Period('2000-01-01 12:15:02.123', 'L')", repr(p))
def test_microsecond_repr(self):
p = Period('2000-01-01 12:15:02.123567')
self.assertEqual("Period('2000-01-01 12:15:02.123567', 'U')", repr(p))
def test_strftime(self):
p = Period('2000-1-1 12:34:12', freq='S')
res = p.strftime('%Y-%m-%d %H:%M:%S')
self.assertEqual(res, '2000-01-01 12:34:12')
tm.assertIsInstance(res, compat.text_type) # GH3363
def test_sub_delta(self):
left, right = Period('2011', freq='A'), Period('2007', freq='A')
result = left - right
self.assertEqual(result, 4)
self.assertRaises(ValueError, left.__sub__,
Period('2007-01', freq='M'))
def test_to_timestamp(self):
p = Period('1982', freq='A')
start_ts = p.to_timestamp(how='S')
aliases = ['s', 'StarT', 'BEGIn']
for a in aliases:
self.assertEqual(start_ts, p.to_timestamp('D', how=a))
# freq with mult should not affect to the result
self.assertEqual(start_ts, p.to_timestamp('3D', how=a))
end_ts = p.to_timestamp(how='E')
aliases = ['e', 'end', 'FINIsH']
for a in aliases:
self.assertEqual(end_ts, p.to_timestamp('D', how=a))
self.assertEqual(end_ts, p.to_timestamp('3D', how=a))
from_lst = ['A', 'Q', 'M', 'W', 'B',
'D', 'H', 'Min', 'S']
def _ex(p):
return Timestamp((p + 1).start_time.value - 1)
for i, fcode in enumerate(from_lst):
p = Period('1982', freq=fcode)
result = p.to_timestamp().to_period(fcode)
self.assertEqual(result, p)
self.assertEqual(p.start_time, p.to_timestamp(how='S'))
self.assertEqual(p.end_time, _ex(p))
# Frequency other than daily
p = Period('1985', freq='A')
result = p.to_timestamp('H', how='end')
expected = datetime(1985, 12, 31, 23)
self.assertEqual(result, expected)
result = p.to_timestamp('3H', how='end')
self.assertEqual(result, expected)
result = p.to_timestamp('T', how='end')
expected = datetime(1985, 12, 31, 23, 59)
self.assertEqual(result, expected)
result = p.to_timestamp('2T', how='end')
self.assertEqual(result, expected)
result = p.to_timestamp(how='end')
expected = datetime(1985, 12, 31)
self.assertEqual(result, expected)
expected = datetime(1985, 1, 1)
result = p.to_timestamp('H', how='start')
self.assertEqual(result, expected)
result = p.to_timestamp('T', how='start')
self.assertEqual(result, expected)
result = p.to_timestamp('S', how='start')
self.assertEqual(result, expected)
result = p.to_timestamp('3H', how='start')
self.assertEqual(result, expected)
result = p.to_timestamp('5S', how='start')
self.assertEqual(result, expected)
p = Period('NaT', freq='W')
self.assertTrue(p.to_timestamp() is tslib.NaT)
def test_start_time(self):
freq_lst = ['A', 'Q', 'M', 'D', 'H', 'T', 'S']
xp = datetime(2012, 1, 1)
for f in freq_lst:
p = Period('2012', freq=f)
self.assertEqual(p.start_time, xp)
self.assertEqual(Period('2012', freq='B').start_time,
datetime(2012, 1, 2))
self.assertEqual(Period('2012', freq='W').start_time,
datetime(2011, 12, 26))
p = Period('NaT', freq='W')
self.assertTrue(p.start_time is tslib.NaT)
def test_end_time(self):
p = Period('2012', freq='A')
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
xp = _ex(2013, 1, 1)
self.assertEqual(xp, p.end_time)
p = Period('2012', freq='Q')
xp = _ex(2012, 4, 1)
self.assertEqual(xp, p.end_time)
p = Period('2012', freq='M')
xp = _ex(2012, 2, 1)
self.assertEqual(xp, p.end_time)
xp = _ex(2012, 1, 2)
p = Period('2012', freq='D')
self.assertEqual(p.end_time, xp)
xp = _ex(2012, 1, 1, 1)
p = Period('2012', freq='H')
self.assertEqual(p.end_time, xp)
xp = _ex(2012, 1, 3)
self.assertEqual(Period('2012', freq='B').end_time, xp)
xp = _ex(2012, 1, 2)
self.assertEqual(Period('2012', freq='W').end_time, xp)
p = Period('NaT', freq='W')
self.assertTrue(p.end_time is tslib.NaT)
def test_anchor_week_end_time(self):
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
p = Period('2013-1-1', 'W-SAT')
xp = _ex(2013, 1, 6)
self.assertEqual(p.end_time, xp)
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq='A', year=2007)
assert_equal(a_date.year, 2007)
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
assert_equal((qd + x).qyear, 2007)
assert_equal((qd + x).quarter, x + 1)
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq='M', year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
assert_equal(m_ival_x.year, 2007)
if 1 <= x + 1 <= 3:
assert_equal(m_ival_x.quarter, 1)
elif 4 <= x + 1 <= 6:
assert_equal(m_ival_x.quarter, 2)
elif 7 <= x + 1 <= 9:
assert_equal(m_ival_x.quarter, 3)
elif 10 <= x + 1 <= 12:
assert_equal(m_ival_x.quarter, 4)
assert_equal(m_ival_x.month, x + 1)
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq='W', year=2007, month=1, day=7)
#
assert_equal(w_date.year, 2007)
assert_equal(w_date.quarter, 1)
assert_equal(w_date.month, 1)
assert_equal(w_date.week, 1)
assert_equal((w_date - 1).week, 52)
assert_equal(w_date.days_in_month, 31)
assert_equal(Period(freq='W', year=2012, month=2, day=1).days_in_month, 29)
def test_properties_weekly_legacy(self):
# Test properties on Periods with daily frequency.
with tm.assert_produces_warning(FutureWarning):
w_date = Period(freq='WK', year=2007, month=1, day=7)
#
assert_equal(w_date.year, 2007)
assert_equal(w_date.quarter, 1)
assert_equal(w_date.month, 1)
assert_equal(w_date.week, 1)
assert_equal((w_date - 1).week, 52)
assert_equal(w_date.days_in_month, 31)
with tm.assert_produces_warning(FutureWarning):
exp = Period(freq='WK', year=2012, month=2, day=1)
assert_equal(exp.days_in_month, 29)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq='B', year=2007, month=1, day=1)
#
assert_equal(b_date.year, 2007)
assert_equal(b_date.quarter, 1)
assert_equal(b_date.month, 1)
assert_equal(b_date.day, 1)
assert_equal(b_date.weekday, 0)
assert_equal(b_date.dayofyear, 1)
assert_equal(b_date.days_in_month, 31)
assert_equal(Period(freq='B', year=2012, month=2, day=1).days_in_month, 29)
#
d_date = Period(freq='D', year=2007, month=1, day=1)
#
assert_equal(d_date.year, 2007)
assert_equal(d_date.quarter, 1)
assert_equal(d_date.month, 1)
assert_equal(d_date.day, 1)
assert_equal(d_date.weekday, 0)
assert_equal(d_date.dayofyear, 1)
assert_equal(d_date.days_in_month, 31)
assert_equal(Period(freq='D', year=2012, month=2,
day=1).days_in_month, 29)
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date1 = Period(freq='H', year=2007, month=1, day=1, hour=0)
h_date2 = Period(freq='2H', year=2007, month=1, day=1, hour=0)
for h_date in [h_date1, h_date2]:
assert_equal(h_date.year, 2007)
assert_equal(h_date.quarter, 1)
assert_equal(h_date.month, 1)
assert_equal(h_date.day, 1)
assert_equal(h_date.weekday, 0)
assert_equal(h_date.dayofyear, 1)
assert_equal(h_date.hour, 0)
assert_equal(h_date.days_in_month, 31)
assert_equal(Period(freq='H', year=2012, month=2, day=1,
hour=0).days_in_month, 29)
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
#
assert_equal(t_date.quarter, 1)
assert_equal(t_date.month, 1)
assert_equal(t_date.day, 1)
assert_equal(t_date.weekday, 0)
assert_equal(t_date.dayofyear, 1)
assert_equal(t_date.hour, 0)
assert_equal(t_date.minute, 0)
assert_equal(t_date.days_in_month, 31)
assert_equal(Period(freq='D', year=2012, month=2, day=1, hour=0,
minute=0).days_in_month, 29)
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
#
assert_equal(s_date.year, 2007)
assert_equal(s_date.quarter, 1)
assert_equal(s_date.month, 1)
assert_equal(s_date.day, 1)
assert_equal(s_date.weekday, 0)
assert_equal(s_date.dayofyear, 1)
assert_equal(s_date.hour, 0)
assert_equal(s_date.minute, 0)
assert_equal(s_date.second, 0)
assert_equal(s_date.days_in_month, 31)
assert_equal(Period(freq='Min', year=2012, month=2, day=1, hour=0,
minute=0, second=0).days_in_month, 29)
def test_properties_nat(self):
p_nat = Period('NaT', freq='M')
t_nat = pd.Timestamp('NaT')
# confirm Period('NaT') work identical with Timestamp('NaT')
for f in ['year', 'month', 'day', 'hour', 'minute', 'second',
'week', 'dayofyear', 'quarter', 'days_in_month']:
self.assertTrue(np.isnan(getattr(p_nat, f)))
self.assertTrue(np.isnan(getattr(t_nat, f)))
for f in ['weekofyear', 'dayofweek', 'weekday', 'qyear']:
self.assertTrue(np.isnan(getattr(p_nat, f)))
def test_pnow(self):
dt = datetime.now()
val = period.pnow('D')
exp = Period(dt, freq='D')
self.assertEqual(val, exp)
val2 = period.pnow('2D')
exp2 = Period(dt, freq='2D')
self.assertEqual(val2, exp2)
self.assertEqual(val.ordinal, val2.ordinal)
self.assertEqual(val.ordinal, exp2.ordinal)
def test_constructor_corner(self):
expected = Period('2007-01', freq='2M')
self.assertEqual(Period(year=2007, month=1, freq='2M'), expected)
self.assertRaises(ValueError, Period, datetime.now())
self.assertRaises(ValueError, Period, datetime.now().date())
self.assertRaises(ValueError, Period, 1.6, freq='D')
self.assertRaises(ValueError, Period, ordinal=1.6, freq='D')
self.assertRaises(ValueError, Period, ordinal=2, value=1, freq='D')
self.assertRaises(ValueError, Period)
self.assertRaises(ValueError, Period, month=1)
p = Period('2007-01-01', freq='D')
result = Period(p, freq='A')
exp = Period('2007', freq='A')
self.assertEqual(result, exp)
def test_constructor_infer_freq(self):
p = Period('2007-01-01')
self.assertEqual(p.freq, 'D')
p = Period('2007-01-01 07')
self.assertEqual(p.freq, 'H')
p = Period('2007-01-01 07:10')
self.assertEqual(p.freq, 'T')
p = Period('2007-01-01 07:10:15')
self.assertEqual(p.freq, 'S')
p = Period('2007-01-01 07:10:15.123')
self.assertEqual(p.freq, 'L')
p = Period('2007-01-01 07:10:15.123000')
self.assertEqual(p.freq, 'L')
p = Period('2007-01-01 07:10:15.123400')
self.assertEqual(p.freq, 'U')
def test_asfreq_MS(self):
initial = Period("2013")
self.assertEqual(initial.asfreq(freq="M", how="S"), Period('2013-01', 'M'))
self.assertRaises(ValueError, initial.asfreq, freq="MS", how="S")
tm.assertRaisesRegexp(ValueError, "Unknown freqstr: MS", pd.Period, '2013-01', 'MS')
self.assertTrue(_period_code_map.get("MS") is None)
def noWrap(item):
return item
class TestFreqConversion(tm.TestCase):
"Test frequency conversion of date objects"
def test_asfreq_corner(self):
val = Period(freq='A', year=2007)
result1 = val.asfreq('5t')
result2 = val.asfreq('t')
expected = Period('2007-12-31 23:59', freq='t')
self.assertEqual(result1.ordinal, expected.ordinal)
self.assertEqual(result1.freqstr, '5T')
self.assertEqual(result2.ordinal, expected.ordinal)
self.assertEqual(result2.freqstr, 'T')
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='W', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
assert_equal(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
assert_equal(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
assert_equal(ival_A.asfreq('M', 's'), ival_A_to_M_start)
assert_equal(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
assert_equal(ival_A.asfreq('W', 'S'), ival_A_to_W_start)
assert_equal(ival_A.asfreq('W', 'E'), ival_A_to_W_end)
assert_equal(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
assert_equal(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
assert_equal(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
assert_equal(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
assert_equal(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
assert_equal(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
assert_equal(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
assert_equal(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
assert_equal(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
assert_equal(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
assert_equal(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
assert_equal(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
assert_equal(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
assert_equal(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
assert_equal(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='W', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
assert_equal(ival_Q.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
assert_equal(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
assert_equal(ival_Q.asfreq('W', 'S'), ival_Q_to_W_start)
assert_equal(ival_Q.asfreq('W', 'E'), ival_Q_to_W_end)
assert_equal(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
assert_equal(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
assert_equal(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
assert_equal(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
assert_equal(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
assert_equal(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
assert_equal(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
assert_equal(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
assert_equal(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
assert_equal(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
assert_equal(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
assert_equal(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
assert_equal(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
assert_equal(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
assert_equal(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='W', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
assert_equal(ival_M.asfreq('A'), ival_M_to_A)
assert_equal(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
assert_equal(ival_M.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M.asfreq('W', 'S'), ival_M_to_W_start)
assert_equal(ival_M.asfreq('W', 'E'), ival_M_to_W_end)
assert_equal(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
assert_equal(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
assert_equal(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
assert_equal(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
assert_equal(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
assert_equal(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
assert_equal(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
assert_equal(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
assert_equal(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
assert_equal(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
assert_equal(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='W', year=2007, month=1, day=1)
ival_WSUN = Period(freq='W', year=2007, month=1, day=7)
ival_WSAT = Period(freq='W-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='W-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='W-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='W-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='W-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='W-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq='W', year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq='W', year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq='W', year=2007, month=1, day=31)
ival_W_to_A = Period(freq='A', year=2007)
ival_W_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_W_to_M = Period(freq='M', year=2007, month=1)
if Period(freq='D', year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq='A', year=2007)
else:
ival_W_to_A_end_of_year = Period(freq='A', year=2008)
if Period(freq='D', year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=2)
if Period(freq='D', year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=2)
ival_W_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq='B', year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_W_to_H_end = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_W_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_W_to_T_end = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_W_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_W_to_S_end = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
assert_equal(ival_W.asfreq('A'), ival_W_to_A)
assert_equal(ival_W_end_of_year.asfreq('A'),
ival_W_to_A_end_of_year)
assert_equal(ival_W.asfreq('Q'), ival_W_to_Q)
assert_equal(ival_W_end_of_quarter.asfreq('Q'),
ival_W_to_Q_end_of_quarter)
assert_equal(ival_W.asfreq('M'), ival_W_to_M)
assert_equal(ival_W_end_of_month.asfreq('M'),
ival_W_to_M_end_of_month)
assert_equal(ival_W.asfreq('B', 'S'), ival_W_to_B_start)
assert_equal(ival_W.asfreq('B', 'E'), ival_W_to_B_end)
assert_equal(ival_W.asfreq('D', 'S'), ival_W_to_D_start)
assert_equal(ival_W.asfreq('D', 'E'), ival_W_to_D_end)
assert_equal(ival_WSUN.asfreq('D', 'S'), ival_WSUN_to_D_start)
assert_equal(ival_WSUN.asfreq('D', 'E'), ival_WSUN_to_D_end)
assert_equal(ival_WSAT.asfreq('D', 'S'), ival_WSAT_to_D_start)
assert_equal(ival_WSAT.asfreq('D', 'E'), ival_WSAT_to_D_end)
assert_equal(ival_WFRI.asfreq('D', 'S'), ival_WFRI_to_D_start)
assert_equal(ival_WFRI.asfreq('D', 'E'), ival_WFRI_to_D_end)
assert_equal(ival_WTHU.asfreq('D', 'S'), ival_WTHU_to_D_start)
assert_equal(ival_WTHU.asfreq('D', 'E'), ival_WTHU_to_D_end)
assert_equal(ival_WWED.asfreq('D', 'S'), ival_WWED_to_D_start)
assert_equal(ival_WWED.asfreq('D', 'E'), ival_WWED_to_D_end)
assert_equal(ival_WTUE.asfreq('D', 'S'), ival_WTUE_to_D_start)
assert_equal(ival_WTUE.asfreq('D', 'E'), ival_WTUE_to_D_end)
assert_equal(ival_WMON.asfreq('D', 'S'), ival_WMON_to_D_start)
assert_equal(ival_WMON.asfreq('D', 'E'), ival_WMON_to_D_end)
assert_equal(ival_W.asfreq('H', 'S'), ival_W_to_H_start)
assert_equal(ival_W.asfreq('H', 'E'), ival_W_to_H_end)
assert_equal(ival_W.asfreq('Min', 'S'), ival_W_to_T_start)
assert_equal(ival_W.asfreq('Min', 'E'), ival_W_to_T_end)
assert_equal(ival_W.asfreq('S', 'S'), ival_W_to_S_start)
assert_equal(ival_W.asfreq('S', 'E'), ival_W_to_S_end)
assert_equal(ival_W.asfreq('W'), ival_W)
def test_conv_weekly_legacy(self):
# frequency conversion tests: from Weekly Frequency
with tm.assert_produces_warning(FutureWarning):
ival_W = Period(freq='WK', year=2007, month=1, day=1)
with tm.assert_produces_warning(FutureWarning):
ival_WSUN = Period(freq='WK', year=2007, month=1, day=7)
with tm.assert_produces_warning(FutureWarning):
ival_WSAT = Period(freq='WK-SAT', year=2007, month=1, day=6)
with tm.assert_produces_warning(FutureWarning):
ival_WFRI = Period(freq='WK-FRI', year=2007, month=1, day=5)
with tm.assert_produces_warning(FutureWarning):
ival_WTHU = Period(freq='WK-THU', year=2007, month=1, day=4)
with tm.assert_produces_warning(FutureWarning):
ival_WWED = Period(freq='WK-WED', year=2007, month=1, day=3)
with tm.assert_produces_warning(FutureWarning):
ival_WTUE = Period(freq='WK-TUE', year=2007, month=1, day=2)
with tm.assert_produces_warning(FutureWarning):
ival_WMON = Period(freq='WK-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1)
with tm.assert_produces_warning(FutureWarning):
ival_W_end_of_year = Period(freq='WK', year=2007, month=12, day=31)
with tm.assert_produces_warning(FutureWarning):
ival_W_end_of_quarter = Period(freq='WK', year=2007, month=3, day=31)
with tm.assert_produces_warning(FutureWarning):
ival_W_end_of_month = Period(freq='WK', year=2007, month=1, day=31)
ival_W_to_A = Period(freq='A', year=2007)
ival_W_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_W_to_M = Period(freq='M', year=2007, month=1)
if Period(freq='D', year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq='A', year=2007)
else:
ival_W_to_A_end_of_year = Period(freq='A', year=2008)
if Period(freq='D', year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=2)
if Period(freq='D', year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=2)
ival_W_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq='B', year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_W_to_H_end = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_W_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_W_to_T_end = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_W_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_W_to_S_end = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
assert_equal(ival_W.asfreq('A'), ival_W_to_A)
assert_equal(ival_W_end_of_year.asfreq('A'),
ival_W_to_A_end_of_year)
assert_equal(ival_W.asfreq('Q'), ival_W_to_Q)
assert_equal(ival_W_end_of_quarter.asfreq('Q'),
ival_W_to_Q_end_of_quarter)
assert_equal(ival_W.asfreq('M'), ival_W_to_M)
assert_equal(ival_W_end_of_month.asfreq('M'),
ival_W_to_M_end_of_month)
assert_equal(ival_W.asfreq('B', 'S'), ival_W_to_B_start)
assert_equal(ival_W.asfreq('B', 'E'), ival_W_to_B_end)
assert_equal(ival_W.asfreq('D', 'S'), ival_W_to_D_start)
assert_equal(ival_W.asfreq('D', 'E'), ival_W_to_D_end)
assert_equal(ival_WSUN.asfreq('D', 'S'), ival_WSUN_to_D_start)
assert_equal(ival_WSUN.asfreq('D', 'E'), ival_WSUN_to_D_end)
assert_equal(ival_WSAT.asfreq('D', 'S'), ival_WSAT_to_D_start)
assert_equal(ival_WSAT.asfreq('D', 'E'), ival_WSAT_to_D_end)
assert_equal(ival_WFRI.asfreq('D', 'S'), ival_WFRI_to_D_start)
assert_equal(ival_WFRI.asfreq('D', 'E'), ival_WFRI_to_D_end)
assert_equal(ival_WTHU.asfreq('D', 'S'), ival_WTHU_to_D_start)
assert_equal(ival_WTHU.asfreq('D', 'E'), ival_WTHU_to_D_end)
assert_equal(ival_WWED.asfreq('D', 'S'), ival_WWED_to_D_start)
assert_equal(ival_WWED.asfreq('D', 'E'), ival_WWED_to_D_end)
assert_equal(ival_WTUE.asfreq('D', 'S'), ival_WTUE_to_D_start)
assert_equal(ival_WTUE.asfreq('D', 'E'), ival_WTUE_to_D_end)
assert_equal(ival_WMON.asfreq('D', 'S'), ival_WMON_to_D_start)
assert_equal(ival_WMON.asfreq('D', 'E'), ival_WMON_to_D_end)
assert_equal(ival_W.asfreq('H', 'S'), ival_W_to_H_start)
assert_equal(ival_W.asfreq('H', 'E'), ival_W_to_H_end)
assert_equal(ival_W.asfreq('Min', 'S'), ival_W_to_T_start)
assert_equal(ival_W.asfreq('Min', 'E'), ival_W_to_T_end)
assert_equal(ival_W.asfreq('S', 'S'), ival_W_to_S_start)
assert_equal(ival_W.asfreq('S', 'E'), ival_W_to_S_end)
with tm.assert_produces_warning(FutureWarning):
assert_equal(ival_W.asfreq('WK'), ival_W)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq='B', year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq='B', year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq='B', year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq='B', year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq='B', year=2007, month=1, day=5)
ival_B_to_A = Period(freq='A', year=2007)
ival_B_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_B_to_M = Period(freq='M', year=2007, month=1)
ival_B_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_B_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_B_to_H_end = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_B_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_B_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_B_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_B_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
assert_equal(ival_B.asfreq('A'), ival_B_to_A)
assert_equal(ival_B_end_of_year.asfreq('A'), ival_B_to_A)
assert_equal(ival_B.asfreq('Q'), ival_B_to_Q)
assert_equal(ival_B_end_of_quarter.asfreq('Q'), ival_B_to_Q)
assert_equal(ival_B.asfreq('M'), ival_B_to_M)
assert_equal(ival_B_end_of_month.asfreq('M'), ival_B_to_M)
assert_equal(ival_B.asfreq('W'), ival_B_to_W)
assert_equal(ival_B_end_of_week.asfreq('W'), ival_B_to_W)
assert_equal(ival_B.asfreq('D'), ival_B_to_D)
assert_equal(ival_B.asfreq('H', 'S'), ival_B_to_H_start)
assert_equal(ival_B.asfreq('H', 'E'), ival_B_to_H_end)
assert_equal(ival_B.asfreq('Min', 'S'), ival_B_to_T_start)
assert_equal(ival_B.asfreq('Min', 'E'), ival_B_to_T_end)
assert_equal(ival_B.asfreq('S', 'S'), ival_B_to_S_start)
assert_equal(ival_B.asfreq('S', 'E'), ival_B_to_S_end)
assert_equal(ival_B.asfreq('B'), ival_B)
def test_conv_daily(self):
# frequency conversion tests: from Business Frequency"
ival_D = Period(freq='D', year=2007, month=1, day=1)
ival_D_end_of_year = Period(freq='D', year=2007, month=12, day=31)
ival_D_end_of_quarter = Period(freq='D', year=2007, month=3, day=31)
ival_D_end_of_month = Period(freq='D', year=2007, month=1, day=31)
ival_D_end_of_week = Period(freq='D', year=2007, month=1, day=7)
ival_D_friday = Period(freq='D', year=2007, month=1, day=5)
ival_D_saturday = Period(freq='D', year=2007, month=1, day=6)
ival_D_sunday = Period(freq='D', year=2007, month=1, day=7)
ival_D_monday = Period(freq='D', year=2007, month=1, day=8)
ival_B_friday = Period(freq='B', year=2007, month=1, day=5)
ival_B_monday = Period(freq='B', year=2007, month=1, day=8)
ival_D_to_A = Period(freq='A', year=2007)
ival_Deoq_to_AJAN = Period(freq='A-JAN', year=2008)
ival_Deoq_to_AJUN = Period(freq='A-JUN', year=2007)
ival_Deoq_to_ADEC = Period(freq='A-DEC', year=2007)
ival_D_to_QEJAN = Period(freq="Q-JAN", year=2007, quarter=4)
ival_D_to_QEJUN = Period(freq="Q-JUN", year=2007, quarter=3)
ival_D_to_QEDEC = Period(freq="Q-DEC", year=2007, quarter=1)
ival_D_to_M = Period(freq='M', year=2007, month=1)
ival_D_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_D_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_D_to_H_end = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_D_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_D_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_D_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_D_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
assert_equal(ival_D.asfreq('A'), ival_D_to_A)
assert_equal(ival_D_end_of_quarter.asfreq('A-JAN'),
ival_Deoq_to_AJAN)
assert_equal(ival_D_end_of_quarter.asfreq('A-JUN'),
ival_Deoq_to_AJUN)
assert_equal(ival_D_end_of_quarter.asfreq('A-DEC'),
ival_Deoq_to_ADEC)
assert_equal(ival_D_end_of_year.asfreq('A'), ival_D_to_A)
assert_equal(ival_D_end_of_quarter.asfreq('Q'), ival_D_to_QEDEC)
assert_equal(ival_D.asfreq("Q-JAN"), ival_D_to_QEJAN)
assert_equal(ival_D.asfreq("Q-JUN"), ival_D_to_QEJUN)
assert_equal(ival_D.asfreq("Q-DEC"), ival_D_to_QEDEC)
assert_equal(ival_D.asfreq('M'), ival_D_to_M)
assert_equal(ival_D_end_of_month.asfreq('M'), ival_D_to_M)
assert_equal(ival_D.asfreq('W'), ival_D_to_W)
assert_equal(ival_D_end_of_week.asfreq('W'), ival_D_to_W)
assert_equal(ival_D_friday.asfreq('B'), ival_B_friday)
assert_equal(ival_D_saturday.asfreq('B', 'S'), ival_B_friday)
assert_equal(ival_D_saturday.asfreq('B', 'E'), ival_B_monday)
assert_equal(ival_D_sunday.asfreq('B', 'S'), ival_B_friday)
assert_equal(ival_D_sunday.asfreq('B', 'E'), ival_B_monday)
assert_equal(ival_D.asfreq('H', 'S'), ival_D_to_H_start)
assert_equal(ival_D.asfreq('H', 'E'), ival_D_to_H_end)
assert_equal(ival_D.asfreq('Min', 'S'), ival_D_to_T_start)
assert_equal(ival_D.asfreq('Min', 'E'), ival_D_to_T_end)
assert_equal(ival_D.asfreq('S', 'S'), ival_D_to_S_start)
assert_equal(ival_D.asfreq('S', 'E'), ival_D_to_S_end)
assert_equal(ival_D.asfreq('D'), ival_D)
def test_conv_hourly(self):
# frequency conversion tests: from Hourly Frequency"
ival_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_H_end_of_year = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_H_end_of_quarter = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_H_end_of_month = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_H_end_of_week = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_H_end_of_day = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_end_of_bus = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_to_A = Period(freq='A', year=2007)
ival_H_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_H_to_M = Period(freq='M', year=2007, month=1)
ival_H_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_H_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_H_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_H_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_H_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_H_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_H_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
assert_equal(ival_H.asfreq('A'), ival_H_to_A)
assert_equal(ival_H_end_of_year.asfreq('A'), ival_H_to_A)
assert_equal(ival_H.asfreq('Q'), ival_H_to_Q)
assert_equal(ival_H_end_of_quarter.asfreq('Q'), ival_H_to_Q)
assert_equal(ival_H.asfreq('M'), ival_H_to_M)
assert_equal(ival_H_end_of_month.asfreq('M'), ival_H_to_M)
assert_equal(ival_H.asfreq('W'), ival_H_to_W)
assert_equal(ival_H_end_of_week.asfreq('W'), ival_H_to_W)
assert_equal(ival_H.asfreq('D'), ival_H_to_D)
assert_equal(ival_H_end_of_day.asfreq('D'), ival_H_to_D)
assert_equal(ival_H.asfreq('B'), ival_H_to_B)
assert_equal(ival_H_end_of_bus.asfreq('B'), ival_H_to_B)
assert_equal(ival_H.asfreq('Min', 'S'), ival_H_to_T_start)
assert_equal(ival_H.asfreq('Min', 'E'), ival_H_to_T_end)
assert_equal(ival_H.asfreq('S', 'S'), ival_H_to_S_start)
assert_equal(ival_H.asfreq('S', 'E'), ival_H_to_S_end)
assert_equal(ival_H.asfreq('H'), ival_H)
def test_conv_minutely(self):
# frequency conversion tests: from Minutely Frequency"
ival_T = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_T_end_of_year = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_T_end_of_quarter = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_T_end_of_month = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_T_end_of_week = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_T_end_of_day = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_bus = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_hour = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_T_to_A = Period(freq='A', year=2007)
ival_T_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_T_to_M = Period(freq='M', year=2007, month=1)
ival_T_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_T_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_T_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_T_to_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_T_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_T_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
assert_equal(ival_T.asfreq('A'), ival_T_to_A)
assert_equal(ival_T_end_of_year.asfreq('A'), ival_T_to_A)
assert_equal(ival_T.asfreq('Q'), ival_T_to_Q)
assert_equal(ival_T_end_of_quarter.asfreq('Q'), ival_T_to_Q)
assert_equal(ival_T.asfreq('M'), ival_T_to_M)
assert_equal(ival_T_end_of_month.asfreq('M'), ival_T_to_M)
assert_equal(ival_T.asfreq('W'), ival_T_to_W)
assert_equal(ival_T_end_of_week.asfreq('W'), ival_T_to_W)
assert_equal(ival_T.asfreq('D'), ival_T_to_D)
assert_equal(ival_T_end_of_day.asfreq('D'), ival_T_to_D)
assert_equal(ival_T.asfreq('B'), ival_T_to_B)
assert_equal(ival_T_end_of_bus.asfreq('B'), ival_T_to_B)
assert_equal(ival_T.asfreq('H'), ival_T_to_H)
assert_equal(ival_T_end_of_hour.asfreq('H'), ival_T_to_H)
assert_equal(ival_T.asfreq('S', 'S'), ival_T_to_S_start)
assert_equal(ival_T.asfreq('S', 'E'), ival_T_to_S_end)
assert_equal(ival_T.asfreq('Min'), ival_T)
def test_conv_secondly(self):
# frequency conversion tests: from Secondly Frequency"
ival_S = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_S_end_of_year = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_quarter = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_month = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_week = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
ival_S_end_of_day = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_bus = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_hour = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
ival_S_end_of_minute = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
ival_S_to_A = Period(freq='A', year=2007)
ival_S_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_S_to_M = Period(freq='M', year=2007, month=1)
ival_S_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_S_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_S_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_S_to_H = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_S_to_T = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
assert_equal(ival_S.asfreq('A'), ival_S_to_A)
assert_equal(ival_S_end_of_year.asfreq('A'), ival_S_to_A)
assert_equal(ival_S.asfreq('Q'), ival_S_to_Q)
assert_equal(ival_S_end_of_quarter.asfreq('Q'), ival_S_to_Q)
assert_equal(ival_S.asfreq('M'), ival_S_to_M)
assert_equal(ival_S_end_of_month.asfreq('M'), ival_S_to_M)
assert_equal(ival_S.asfreq('W'), ival_S_to_W)
assert_equal(ival_S_end_of_week.asfreq('W'), ival_S_to_W)
assert_equal(ival_S.asfreq('D'), ival_S_to_D)
assert_equal(ival_S_end_of_day.asfreq('D'), ival_S_to_D)
assert_equal(ival_S.asfreq('B'), ival_S_to_B)
assert_equal(ival_S_end_of_bus.asfreq('B'), ival_S_to_B)
assert_equal(ival_S.asfreq('H'), ival_S_to_H)
assert_equal(ival_S_end_of_hour.asfreq('H'), ival_S_to_H)
assert_equal(ival_S.asfreq('Min'), ival_S_to_T)
assert_equal(ival_S_end_of_minute.asfreq('Min'), ival_S_to_T)
assert_equal(ival_S.asfreq('S'), ival_S)
def test_asfreq_nat(self):
p = Period('NaT', freq='A')
result = p.asfreq('M')
self.assertEqual(result.ordinal, tslib.iNaT)
self.assertEqual(result.freq, 'M')
def test_asfreq_mult(self):
# normal freq to mult freq
p = Period(freq='A', year=2007)
# ordinal will not change
for freq in ['3A', offsets.YearEnd(3)]:
result = p.asfreq(freq)
expected = Period('2007', freq='3A')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
# ordinal will not change
for freq in ['3A', offsets.YearEnd(3)]:
result = p.asfreq(freq, how='S')
expected = Period('2007', freq='3A')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
# mult freq to normal freq
p = Period(freq='3A', year=2007)
# ordinal will change because how=E is the default
for freq in ['A', offsets.YearEnd()]:
result = p.asfreq(freq)
expected = Period('2009', freq='A')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
# ordinal will not change
for freq in ['A', offsets.YearEnd()]:
result = p.asfreq(freq, how='S')
expected = Period('2007', freq='A')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
p = Period(freq='A', year=2007)
for freq in ['2M', offsets.MonthEnd(2)]:
result = p.asfreq(freq)
expected = Period('2007-12', freq='2M')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
for freq in ['2M', offsets.MonthEnd(2)]:
result = p.asfreq(freq, how='S')
expected = Period('2007-01', freq='2M')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
p = Period(freq='3A', year=2007)
for freq in ['2M', offsets.MonthEnd(2)]:
result = p.asfreq(freq)
expected = Period('2009-12', freq='2M')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
for freq in ['2M', offsets.MonthEnd(2)]:
result = p.asfreq(freq, how='S')
expected = Period('2007-01', freq='2M')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
def test_asfreq_mult_nat(self):
# normal freq to mult freq
for p in [Period('NaT', freq='A'), Period('NaT', freq='3A'),
Period('NaT', freq='2M'), Period('NaT', freq='3D')]:
for freq in ['3A', offsets.YearEnd(3)]:
result = p.asfreq(freq)
expected = Period('NaT', freq='3A')
self.assertEqual(result.ordinal, pd.tslib.iNaT)
self.assertEqual(result.freq, expected.freq)
result = p.asfreq(freq, how='S')
expected = Period('NaT', freq='3A')
self.assertEqual(result.ordinal, pd.tslib.iNaT)
self.assertEqual(result.freq, expected.freq)
class TestPeriodIndex(tm.TestCase):
def setUp(self):
pass
def test_hash_error(self):
index = period_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_make_time_series(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index)
tm.assertIsInstance(series, Series)
def test_astype(self):
idx = period_range('1990', '2009', freq='A')
result = idx.astype('i8')
self.assert_numpy_array_equal(result, idx.values)
def test_constructor_use_start_freq(self):
# GH #1118
p = Period('4/2/2012', freq='B')
index = PeriodIndex(start=p, periods=10)
expected = PeriodIndex(start='4/2/2012', periods=10, freq='B')
self.assertTrue(index.equals(expected))
def test_constructor_field_arrays(self):
# GH #1264
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex(year=years, quarter=quarters, freq='Q-DEC')
expected = period_range('1990Q3', '2009Q2', freq='Q-DEC')
self.assertTrue(index.equals(expected))
index2 = PeriodIndex(year=years, quarter=quarters, freq='2Q-DEC')
tm.assert_numpy_array_equal(index.asi8, index2.asi8)
index = PeriodIndex(year=years, quarter=quarters)
self.assertTrue(index.equals(expected))
years = [2007, 2007, 2007]
months = [1, 2]
self.assertRaises(ValueError, PeriodIndex, year=years, month=months,
freq='M')
self.assertRaises(ValueError, PeriodIndex, year=years, month=months,
freq='2M')
self.assertRaises(ValueError, PeriodIndex, year=years, month=months,
freq='M', start=Period('2007-01', freq='M'))
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex(year=years, month=months, freq='M')
exp = period_range('2007-01', periods=3, freq='M')
self.assertTrue(idx.equals(exp))
def test_constructor_U(self):
# U was used as undefined period
self.assertRaises(ValueError, period_range, '2007-1-1', periods=500,
freq='X')
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000).repeat(4)
quarters = np.tile(lrange(1, 5), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
self.assert_numpy_array_equal(pindex.year, years)
self.assert_numpy_array_equal(pindex.quarter, quarters)
def test_constructor_invalid_quarters(self):
self.assertRaises(ValueError, PeriodIndex, year=lrange(2000, 2004),
quarter=lrange(4), freq='Q-DEC')
def test_constructor_corner(self):
self.assertRaises(ValueError, PeriodIndex, periods=10, freq='A')
start = Period('2007', freq='A-JUN')
end = Period('2010', freq='A-DEC')
self.assertRaises(ValueError, PeriodIndex, start=start, end=end)
self.assertRaises(ValueError, PeriodIndex, start=start)
self.assertRaises(ValueError, PeriodIndex, end=end)
result = period_range('2007-01', periods=10.5, freq='M')
exp = period_range('2007-01', periods=10, freq='M')
self.assertTrue(result.equals(exp))
def test_constructor_fromarraylike(self):
idx = period_range('2007-01', periods=20, freq='M')
self.assertRaises(ValueError, PeriodIndex, idx.values)
self.assertRaises(ValueError, PeriodIndex, list(idx.values))
self.assertRaises(ValueError, PeriodIndex,
data=Period('2007', freq='A'))
result = PeriodIndex(iter(idx))
self.assertTrue(result.equals(idx))
result = PeriodIndex(idx)
self.assertTrue(result.equals(idx))
result = PeriodIndex(idx, freq='M')
self.assertTrue(result.equals(idx))
result = PeriodIndex(idx, freq=offsets.MonthEnd())
self.assertTrue(result.equals(idx))
self.assertTrue(result.freq, 'M')
result = PeriodIndex(idx, freq='2M')
self.assertTrue(result.equals(idx))
self.assertTrue(result.freq, '2M')
result = PeriodIndex(idx, freq=offsets.MonthEnd(2))
self.assertTrue(result.equals(idx))
self.assertTrue(result.freq, '2M')
result = PeriodIndex(idx, freq='D')
exp = idx.asfreq('D', 'e')
self.assertTrue(result.equals(exp))
def test_constructor_datetime64arr(self):
vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64)
vals = vals.view(np.dtype('M8[us]'))
self.assertRaises(ValueError, PeriodIndex, vals, freq='D')
def test_constructor_simple_new(self):
idx = period_range('2007-01', name='p', periods=20, freq='M')
result = idx._simple_new(idx, 'p', freq=idx.freq)
self.assertTrue(result.equals(idx))
result = idx._simple_new(idx.astype('i8'), 'p', freq=idx.freq)
self.assertTrue(result.equals(idx))
def test_constructor_nat(self):
self.assertRaises(
ValueError, period_range, start='NaT', end='2011-01-01', freq='M')
self.assertRaises(
ValueError, period_range, start='2011-01-01', end='NaT', freq='M')
def test_constructor_year_and_quarter(self):
year = pd.Series([2001, 2002, 2003])
quarter = year - 2000
idx = PeriodIndex(year=year, quarter=quarter)
strs = ['%dQ%d' % t for t in zip(quarter, year)]
lops = list(map(Period, strs))
p = PeriodIndex(lops)
tm.assert_index_equal(p, idx)
def test_constructor_freq_mult(self):
# GH #7811
for func in [PeriodIndex, period_range]:
# must be the same, but for sure...
pidx = func(start='2014-01', freq='2M', periods=4)
expected = PeriodIndex(['2014-01', '2014-03', '2014-05', '2014-07'], freq='M')
tm.assert_index_equal(pidx, expected)
pidx = func(start='2014-01-02', end='2014-01-15', freq='3D')
expected = PeriodIndex(['2014-01-02', '2014-01-05', '2014-01-08', '2014-01-11',
'2014-01-14'], freq='D')
tm.assert_index_equal(pidx, expected)
pidx = func(end='2014-01-01 17:00', freq='4H', periods=3)
expected = PeriodIndex(['2014-01-01 09:00', '2014-01-01 13:00',
'2014-01-01 17:00'], freq='4H')
tm.assert_index_equal(pidx, expected)
msg = ('Frequency must be positive, because it'
' represents span: -1M')
with tm.assertRaisesRegexp(ValueError, msg):
PeriodIndex(['2011-01'], freq='-1M')
msg = ('Frequency must be positive, because it'
' represents span: 0M')
with tm.assertRaisesRegexp(ValueError, msg):
PeriodIndex(['2011-01'], freq='0M')
msg = ('Frequency must be positive, because it'
' represents span: 0M')
with tm.assertRaisesRegexp(ValueError, msg):
period_range('2011-01', periods=3, freq='0M')
def test_constructor_freq_mult_dti_compat(self):
import itertools
mults = [1, 2, 3, 4, 5]
freqs = ['A', 'M', 'D', 'T', 'S']
for mult, freq in itertools.product(mults, freqs):
freqstr = str(mult) + freq
pidx = PeriodIndex(start='2014-04-01', freq=freqstr, periods=10)
expected = date_range(start='2014-04-01', freq=freqstr, periods=10).to_period(freq)
tm.assert_index_equal(pidx, expected)
def test_is_(self):
create_index = lambda: PeriodIndex(freq='A', start='1/1/2001',
end='12/1/2009')
index = create_index()
self.assertEqual(index.is_(index), True)
self.assertEqual(index.is_(create_index()), False)
self.assertEqual(index.is_(index.view()), True)
self.assertEqual(index.is_(index.view().view().view().view().view()), True)
self.assertEqual(index.view().is_(index), True)
ind2 = index.view()
index.name = "Apple"
self.assertEqual(ind2.is_(index), True)
self.assertEqual(index.is_(index[:]), False)
self.assertEqual(index.is_(index.asfreq('M')), False)
self.assertEqual(index.is_(index.asfreq('A')), False)
self.assertEqual(index.is_(index - 2), False)
self.assertEqual(index.is_(index - 0), False)
def test_comp_period(self):
idx = period_range('2007-01', periods=20, freq='M')
result = idx < idx[10]
exp = idx.values < idx.values[10]
self.assert_numpy_array_equal(result, exp)
def test_getitem_ndim2(self):
idx = period_range('2007-01', periods=3, freq='M')
result = idx[:, None]
# MPL kludge
tm.assertIsInstance(result, PeriodIndex)
def test_getitem_partial(self):
rng = period_range('2007-01', periods=50, freq='M')
ts = Series(np.random.randn(len(rng)), rng)
self.assertRaises(KeyError, ts.__getitem__, '2006')
result = ts['2008']
self.assertTrue((result.index.year == 2008).all())
result = ts['2008':'2009']
self.assertEqual(len(result), 24)
result = ts['2008-1':'2009-12']
self.assertEqual(len(result), 24)
result = ts['2008Q1':'2009Q4']
self.assertEqual(len(result), 24)
result = ts[:'2009']
self.assertEqual(len(result), 36)
result = ts['2009':]
self.assertEqual(len(result), 50 - 24)
exp = result
result = ts[24:]
assert_series_equal(exp, result)
ts = ts[10:].append(ts[10:])
self.assertRaisesRegexp(
KeyError, "left slice bound for non-unique label: '2008'",
ts.__getitem__, slice('2008', '2009'))
def test_getitem_datetime(self):
rng = period_range(start='2012-01-01', periods=10, freq='W-MON')
ts = Series(lrange(len(rng)), index=rng)
dt1 = datetime(2011, 10, 2)
dt4 = datetime(2012, 4, 20)
rs = ts[dt1:dt4]
assert_series_equal(rs, ts)
def test_slice_with_negative_step(self):
ts = Series(np.arange(20),
period_range('2014-01', periods=20, freq='M'))
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
assert_series_equal(ts[l_slc], ts.iloc[i_slc])
assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
assert_series_equal(ts.ix[l_slc], ts.iloc[i_slc])
assert_slices_equivalent(SLC[Period('2014-10')::-1], SLC[9::-1])
assert_slices_equivalent(SLC['2014-10'::-1], SLC[9::-1])
assert_slices_equivalent(SLC[:Period('2014-10'):-1], SLC[:8:-1])
assert_slices_equivalent(SLC[:'2014-10':-1], SLC[:8:-1])
assert_slices_equivalent(SLC['2015-02':'2014-10':-1], SLC[13:8:-1])
assert_slices_equivalent(SLC[Period('2015-02'):Period('2014-10'):-1], SLC[13:8:-1])
assert_slices_equivalent(SLC['2015-02':Period('2014-10'):-1], SLC[13:8:-1])
assert_slices_equivalent(SLC[Period('2015-02'):'2014-10':-1], SLC[13:8:-1])
assert_slices_equivalent(SLC['2014-10':'2015-02':-1], SLC[:0])
def test_slice_with_zero_step_raises(self):
ts = Series(np.arange(20),
period_range('2014-01', periods=20, freq='M'))
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts.loc[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts.ix[::0])
def test_contains(self):
rng = period_range('2007-01', freq='M', periods=10)
self.assertTrue(Period('2007-01', freq='M') in rng)
self.assertFalse(Period('2007-01', freq='D') in rng)
self.assertFalse(Period('2007-01', freq='2M') in rng)
def test_sub(self):
rng = period_range('2007-01', periods=50)
result = rng - 5
exp = rng + (-5)
self.assertTrue(result.equals(exp))
def test_periods_number_check(self):
self.assertRaises(
ValueError, period_range, '2011-1-1', '2012-1-1', 'B')
def test_tolist(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
rs = index.tolist()
[tm.assertIsInstance(x, Period) for x in rs]
recon = PeriodIndex(rs)
self.assertTrue(index.equals(recon))
def test_to_timestamp(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index, name='foo')
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = series.to_timestamp(how='end')
self.assertTrue(result.index.equals(exp_index))
self.assertEqual(result.name, 'foo')
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN')
result = series.to_timestamp(how='start')
self.assertTrue(result.index.equals(exp_index))
def _get_with_delta(delta, freq='A-DEC'):
return date_range(to_datetime('1/1/2001') + delta,
to_datetime('12/31/2009') + delta, freq=freq)
delta = timedelta(hours=23)
result = series.to_timestamp('H', 'end')
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
delta = timedelta(hours=23, minutes=59)
result = series.to_timestamp('T', 'end')
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
result = series.to_timestamp('S', 'end')
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
index = PeriodIndex(freq='H', start='1/1/2001', end='1/2/2001')
series = Series(1, index=index, name='foo')
exp_index = date_range('1/1/2001 00:59:59', end='1/2/2001 00:59:59',
freq='H')
result = series.to_timestamp(how='end')
self.assertTrue(result.index.equals(exp_index))
self.assertEqual(result.name, 'foo')
def test_to_timestamp_quarterly_bug(self):
years = np.arange(1960, 2000).repeat(4)
quarters = np.tile(lrange(1, 5), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
stamps = pindex.to_timestamp('D', 'end')
expected = DatetimeIndex([x.to_timestamp('D', 'end') for x in pindex])
self.assertTrue(stamps.equals(expected))
def test_to_timestamp_preserve_name(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009',
name='foo')
self.assertEqual(index.name, 'foo')
conv = index.to_timestamp('D')
self.assertEqual(conv.name, 'foo')
def test_to_timestamp_repr_is_code(self):
zs=[Timestamp('99-04-17 00:00:00',tz='UTC'),
Timestamp('2001-04-17 00:00:00',tz='UTC'),
Timestamp('2001-04-17 00:00:00',tz='America/Los_Angeles'),
Timestamp('2001-04-17 00:00:00',tz=None)]
for z in zs:
self.assertEqual( eval(repr(z)), z)
def test_to_timestamp_pi_nat(self):
# GH 7228
index = PeriodIndex(['NaT', '2011-01', '2011-02'], freq='M', name='idx')
result = index.to_timestamp('D')
expected = DatetimeIndex([pd.NaT, datetime(2011, 1, 1),
datetime(2011, 2, 1)], name='idx')
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, 'idx')
result2 = result.to_period(freq='M')
self.assertTrue(result2.equals(index))
self.assertEqual(result2.name, 'idx')
result3 = result.to_period(freq='3M')
exp = PeriodIndex(['NaT', '2011-01', '2011-02'], freq='3M', name='idx')
self.assert_index_equal(result3, exp)
self.assertEqual(result3.freqstr, '3M')
msg = ('Frequency must be positive, because it'
' represents span: -2A')
with tm.assertRaisesRegexp(ValueError, msg):
result.to_period(freq='-2A')
def test_to_timestamp_pi_mult(self):
idx = PeriodIndex(['2011-01', 'NaT', '2011-02'], freq='2M', name='idx')
result = idx.to_timestamp()
expected = DatetimeIndex(['2011-01-01', 'NaT', '2011-02-01'], name='idx')
self.assert_index_equal(result, expected)
result = idx.to_timestamp(how='E')
expected = DatetimeIndex(['2011-02-28', 'NaT', '2011-03-31'], name='idx')
self.assert_index_equal(result, expected)
def test_as_frame_columns(self):
rng = period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
ts = df[rng[0]]
assert_series_equal(ts, df.ix[:, 0])
# GH # 1211
repr(df)
ts = df['1/1/2000']
assert_series_equal(ts, df.ix[:, 0])
def test_indexing(self):
# GH 4390, iat incorrectly indexing
index = period_range('1/1/2001', periods=10)
s = Series(randn(10), index=index)
expected = s[index[0]]
result = s.iat[0]
self.assertEqual(expected, result)
def test_frame_setitem(self):
rng = period_range('1/1/2000', periods=5)
rng.name = 'index'
df = DataFrame(randn(5, 3), index=rng)
df['Index'] = rng
rs = Index(df['Index'])
self.assertTrue(rs.equals(rng))
rs = df.reset_index().set_index('index')
tm.assertIsInstance(rs.index, PeriodIndex)
self.assertTrue(rs.index.equals(rng))
def test_period_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = period_range('2011/01/01', periods=6, freq='M')
idx2 = period_range('2013', periods=6, freq='A')
df = df.set_index(idx1)
self.assertTrue(df.index.equals(idx1))
df = df.set_index(idx2)
self.assertTrue(df.index.equals(idx2))
def test_frame_to_time_stamp(self):
K = 5
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
df = DataFrame(randn(len(index), K), index=index)
df['mix'] = 'a'
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = df.to_timestamp('D', 'end')
self.assertTrue(result.index.equals(exp_index))
assert_almost_equal(result.values, df.values)
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN')
result = df.to_timestamp('D', 'start')
self.assertTrue(result.index.equals(exp_index))
def _get_with_delta(delta, freq='A-DEC'):
return date_range(to_datetime('1/1/2001') + delta,
to_datetime('12/31/2009') + delta, freq=freq)
delta = timedelta(hours=23)
result = df.to_timestamp('H', 'end')
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
delta = timedelta(hours=23, minutes=59)
result = df.to_timestamp('T', 'end')
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
result = df.to_timestamp('S', 'end')
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
# columns
df = df.T
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = df.to_timestamp('D', 'end', axis=1)
self.assertTrue(result.columns.equals(exp_index))
assert_almost_equal(result.values, df.values)
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN')
result = df.to_timestamp('D', 'start', axis=1)
self.assertTrue(result.columns.equals(exp_index))
delta = timedelta(hours=23)
result = df.to_timestamp('H', 'end', axis=1)
exp_index = _get_with_delta(delta)
self.assertTrue(result.columns.equals(exp_index))
delta = timedelta(hours=23, minutes=59)
result = df.to_timestamp('T', 'end', axis=1)
exp_index = _get_with_delta(delta)
self.assertTrue(result.columns.equals(exp_index))
result = df.to_timestamp('S', 'end', axis=1)
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
self.assertTrue(result.columns.equals(exp_index))
# invalid axis
assertRaisesRegexp(ValueError, 'axis', df.to_timestamp, axis=2)
result1 = df.to_timestamp('5t', axis=1)
result2 = df.to_timestamp('t', axis=1)
expected = pd.date_range('2001-01-01', '2009-01-01', freq='AS')
self.assertTrue(isinstance(result1.columns, DatetimeIndex))
self.assertTrue(isinstance(result2.columns, DatetimeIndex))
self.assert_numpy_array_equal(result1.columns.asi8, expected.asi8)
self.assert_numpy_array_equal(result2.columns.asi8, expected.asi8)
# PeriodIndex.to_timestamp always use 'infer'
self.assertEqual(result1.columns.freqstr, 'AS-JAN')
self.assertEqual(result2.columns.freqstr, 'AS-JAN')
def test_index_duplicate_periods(self):
# monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN')
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts[2007]
expected = ts[1:3]
assert_series_equal(result, expected)
result[:] = 1
self.assertTrue((ts[1:3] == 1).all())
# not monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN')
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts[2007]
expected = ts[idx == 2007]
assert_series_equal(result, expected)
def test_index_unique(self):
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN')
expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN')
self.assert_numpy_array_equal(idx.unique(), expected.values)
self.assertEqual(idx.nunique(), 3)
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN', tz='US/Eastern')
expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN', tz='US/Eastern')
self.assert_numpy_array_equal(idx.unique(), expected.values)
self.assertEqual(idx.nunique(), 3)
def test_constructor(self):
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 9)
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 4 * 9)
pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 12 * 9)
pi = PeriodIndex(freq='D', start='1/1/2001', end='12/31/2009')
assert_equal(len(pi), 365 * 9 + 2)
pi = PeriodIndex(freq='B', start='1/1/2001', end='12/31/2009')
assert_equal(len(pi), 261 * 9)
pi = PeriodIndex(freq='H', start='1/1/2001', end='12/31/2001 23:00')
assert_equal(len(pi), 365 * 24)
pi = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 23:59')
assert_equal(len(pi), 24 * 60)
pi = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 23:59:59')
assert_equal(len(pi), 24 * 60 * 60)
start = Period('02-Apr-2005', 'B')
i1 = PeriodIndex(start=start, periods=20)
assert_equal(len(i1), 20)
assert_equal(i1.freq, start.freq)
assert_equal(i1[0], start)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), 10)
assert_equal(i1.freq, end_intv.freq)
assert_equal(i1[-1], end_intv)
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assertTrue((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assertTrue((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
try:
PeriodIndex(start=start, end=end_intv)
raise AssertionError('Cannot allow mixed freq for start and end')
except ValueError:
pass
end_intv = Period('2005-05-01', 'B')
i1 = PeriodIndex(start=start, end=end_intv)
try:
PeriodIndex(start=start)
raise AssertionError(
'Must specify periods if missing start or end')
except ValueError:
pass
# infer freq from first element
i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])
assert_equal(len(i2), 2)
assert_equal(i2[0], end_intv)
i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))
assert_equal(len(i2), 2)
assert_equal(i2[0], end_intv)
# Mixed freq should fail
vals = [end_intv, Period('2006-12-31', 'w')]
self.assertRaises(ValueError, PeriodIndex, vals)
vals = np.array(vals)
self.assertRaises(ValueError, PeriodIndex, vals)
def test_shift(self):
pi1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='A', start='1/1/2002', end='12/1/2010')
self.assertTrue(pi1.shift(0).equals(pi1))
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(1).values, pi2.values)
pi1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='A', start='1/1/2000', end='12/1/2008')
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(-1).values, pi2.values)
pi1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='M', start='2/1/2001', end='1/1/2010')
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(1).values, pi2.values)
pi1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='M', start='12/1/2000', end='11/1/2009')
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(-1).values, pi2.values)
pi1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='D', start='1/2/2001', end='12/2/2009')
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(1).values, pi2.values)
pi1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='D', start='12/31/2000', end='11/30/2009')
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(-1).values, pi2.values)
def test_shift_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx')
result = idx.shift(1)
expected = PeriodIndex(['2011-02', '2011-03', 'NaT', '2011-05'], freq='M', name='idx')
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
def test_shift_ndarray(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx')
result = idx.shift(np.array([1, 2, 3, 4]))
expected = PeriodIndex(['2011-02', '2011-04', 'NaT', '2011-08'], freq='M', name='idx')
self.assertTrue(result.equals(expected))
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx')
result = idx.shift(np.array([1, -2, 3, -4]))
expected = PeriodIndex(['2011-02', '2010-12', 'NaT', '2010-12'], freq='M', name='idx')
self.assertTrue(result.equals(expected))
def test_asfreq(self):
pi1 = PeriodIndex(freq='A', start='1/1/2001', end='1/1/2001')
pi2 = PeriodIndex(freq='Q', start='1/1/2001', end='1/1/2001')
pi3 = PeriodIndex(freq='M', start='1/1/2001', end='1/1/2001')
pi4 = PeriodIndex(freq='D', start='1/1/2001', end='1/1/2001')
pi5 = PeriodIndex(freq='H', start='1/1/2001', end='1/1/2001 00:00')
pi6 = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 00:00')
pi7 = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 00:00:00')
self.assertEqual(pi1.asfreq('Q', 'S'), pi2)
self.assertEqual(pi1.asfreq('Q', 's'), pi2)
self.assertEqual(pi1.asfreq('M', 'start'), pi3)
self.assertEqual(pi1.asfreq('D', 'StarT'), pi4)
self.assertEqual(pi1.asfreq('H', 'beGIN'), pi5)
self.assertEqual(pi1.asfreq('Min', 'S'), pi6)
self.assertEqual(pi1.asfreq('S', 'S'), pi7)
self.assertEqual(pi2.asfreq('A', 'S'), pi1)
self.assertEqual(pi2.asfreq('M', 'S'), pi3)
self.assertEqual(pi2.asfreq('D', 'S'), pi4)
self.assertEqual(pi2.asfreq('H', 'S'), pi5)
self.assertEqual(pi2.asfreq('Min', 'S'), pi6)
self.assertEqual(pi2.asfreq('S', 'S'), pi7)
self.assertEqual(pi3.asfreq('A', 'S'), pi1)
self.assertEqual(pi3.asfreq('Q', 'S'), pi2)
self.assertEqual(pi3.asfreq('D', 'S'), pi4)
self.assertEqual(pi3.asfreq('H', 'S'), pi5)
self.assertEqual(pi3.asfreq('Min', 'S'), pi6)
self.assertEqual(pi3.asfreq('S', 'S'), pi7)
self.assertEqual(pi4.asfreq('A', 'S'), pi1)
self.assertEqual(pi4.asfreq('Q', 'S'), pi2)
self.assertEqual(pi4.asfreq('M', 'S'), pi3)
self.assertEqual(pi4.asfreq('H', 'S'), pi5)
self.assertEqual(pi4.asfreq('Min', 'S'), pi6)
self.assertEqual(pi4.asfreq('S', 'S'), pi7)
self.assertEqual(pi5.asfreq('A', 'S'), pi1)
self.assertEqual(pi5.asfreq('Q', 'S'), pi2)
self.assertEqual(pi5.asfreq('M', 'S'), pi3)
self.assertEqual(pi5.asfreq('D', 'S'), pi4)
self.assertEqual(pi5.asfreq('Min', 'S'), pi6)
self.assertEqual(pi5.asfreq('S', 'S'), pi7)
self.assertEqual(pi6.asfreq('A', 'S'), pi1)
self.assertEqual(pi6.asfreq('Q', 'S'), pi2)
self.assertEqual(pi6.asfreq('M', 'S'), pi3)
self.assertEqual(pi6.asfreq('D', 'S'), pi4)
self.assertEqual(pi6.asfreq('H', 'S'), pi5)
self.assertEqual(pi6.asfreq('S', 'S'), pi7)
self.assertEqual(pi7.asfreq('A', 'S'), pi1)
self.assertEqual(pi7.asfreq('Q', 'S'), pi2)
self.assertEqual(pi7.asfreq('M', 'S'), pi3)
self.assertEqual(pi7.asfreq('D', 'S'), pi4)
self.assertEqual(pi7.asfreq('H', 'S'), pi5)
self.assertEqual(pi7.asfreq('Min', 'S'), pi6)
self.assertRaises(ValueError, pi7.asfreq, 'T', 'foo')
result1 = pi1.asfreq('3M')
result2 = pi1.asfreq('M')
expected = PeriodIndex(freq='M', start='2001-12', end='2001-12')
self.assert_numpy_array_equal(result1.asi8, expected.asi8)
self.assertEqual(result1.freqstr, '3M')
self.assert_numpy_array_equal(result2.asi8, expected.asi8)
self.assertEqual(result2.freqstr, 'M')
def test_asfreq_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M')
result = idx.asfreq(freq='Q')
expected = PeriodIndex(['2011Q1', '2011Q1', 'NaT', '2011Q2'], freq='Q')
self.assertTrue(result.equals(expected))
def test_asfreq_mult_pi(self):
pi = PeriodIndex(['2001-01', '2001-02', 'NaT', '2001-03'], freq='2M')
for freq in ['D', '3D']:
result = pi.asfreq(freq)
exp = PeriodIndex(['2001-02-28', '2001-03-31', 'NaT',
'2001-04-30'], freq=freq)
self.assert_index_equal(result, exp)
self.assertEqual(result.freq, exp.freq)
result = pi.asfreq(freq, how='S')
exp = PeriodIndex(['2001-01-01', '2001-02-01', 'NaT',
'2001-03-01'], freq=freq)
self.assert_index_equal(result, exp)
self.assertEqual(result.freq, exp.freq)
def test_period_index_length(self):
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 9)
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 4 * 9)
pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 12 * 9)
start = Period('02-Apr-2005', 'B')
i1 = PeriodIndex(start=start, periods=20)
assert_equal(len(i1), 20)
assert_equal(i1.freq, start.freq)
assert_equal(i1[0], start)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), 10)
assert_equal(i1.freq, end_intv.freq)
assert_equal(i1[-1], end_intv)
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assertTrue((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assertTrue((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
try:
PeriodIndex(start=start, end=end_intv)
raise AssertionError('Cannot allow mixed freq for start and end')
except ValueError:
pass
end_intv = Period('2005-05-01', 'B')
i1 = PeriodIndex(start=start, end=end_intv)
try:
PeriodIndex(start=start)
raise AssertionError(
'Must specify periods if missing start or end')
except ValueError:
pass
# infer freq from first element
i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])
assert_equal(len(i2), 2)
assert_equal(i2[0], end_intv)
i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))
assert_equal(len(i2), 2)
assert_equal(i2[0], end_intv)
# Mixed freq should fail
vals = [end_intv, Period('2006-12-31', 'w')]
self.assertRaises(ValueError, PeriodIndex, vals)
vals = np.array(vals)
self.assertRaises(ValueError, PeriodIndex, vals)
def test_frame_index_to_string(self):
index = PeriodIndex(['2011-1', '2011-2', '2011-3'], freq='M')
frame = DataFrame(np.random.randn(3, 4), index=index)
# it works!
frame.to_string()
def test_asfreq_ts(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/31/2010')
ts = Series(np.random.randn(len(index)), index=index)
df = DataFrame(np.random.randn(len(index), 3), index=index)
result = ts.asfreq('D', how='end')
df_result = df.asfreq('D', how='end')
exp_index = index.asfreq('D', how='end')
self.assertEqual(len(result), len(ts))
self.assertTrue(result.index.equals(exp_index))
self.assertTrue(df_result.index.equals(exp_index))
result = ts.asfreq('D', how='start')
self.assertEqual(len(result), len(ts))
self.assertTrue(result.index.equals(index.asfreq('D', how='start')))
def test_badinput(self):
self.assertRaises(datetools.DateParseError, Period, '1/1/-2000', 'A')
# self.assertRaises(datetools.DateParseError, Period, '-2000', 'A')
# self.assertRaises(datetools.DateParseError, Period, '0', 'A')
def test_negative_ordinals(self):
p = Period(ordinal=-1000, freq='A')
p = Period(ordinal=0, freq='A')
idx1 = PeriodIndex(ordinal=[-1, 0, 1], freq='A')
idx2 = PeriodIndex(ordinal=np.array([-1, 0, 1]), freq='A')
tm.assert_numpy_array_equal(idx1,idx2)
def test_dti_to_period(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
pi1 = dti.to_period()
pi2 = dti.to_period(freq='D')
pi3 = dti.to_period(freq='3D')
self.assertEqual(pi1[0], Period('Jan 2005', freq='M'))
self.assertEqual(pi2[0], Period('1/31/2005', freq='D'))
self.assertEqual(pi3[0], Period('1/31/2005', freq='3D'))
self.assertEqual(pi1[-1], Period('Nov 2005', freq='M'))
self.assertEqual(pi2[-1], Period('11/30/2005', freq='D'))
self.assertEqual(pi3[-1], Period('11/30/2005', freq='3D'))
tm.assert_index_equal(pi1, period_range('1/1/2005', '11/1/2005', freq='M'))
tm.assert_index_equal(pi2, period_range('1/1/2005', '11/1/2005', freq='M').asfreq('D'))
tm.assert_index_equal(pi3, period_range('1/1/2005', '11/1/2005', freq='M').asfreq('3D'))
def test_pindex_slice_index(self):
pi = PeriodIndex(start='1/1/10', end='12/31/12', freq='M')
s = Series(np.random.rand(len(pi)), index=pi)
res = s['2010']
exp = s[0:12]
assert_series_equal(res, exp)
res = s['2011']
exp = s[12:24]
assert_series_equal(res, exp)
def test_getitem_day(self):
# GH 6716
# Confirm DatetimeIndex and PeriodIndex works identically
didx = DatetimeIndex(start='2013/01/01', freq='D', periods=400)
pidx = PeriodIndex(start='2013/01/01', freq='D', periods=400)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = ['2014', '2013/02', '2013/01/02',
'2013/02/01 9H', '2013/02/01 09:00']
for v in values:
if _np_version_under1p9:
with tm.assertRaises(ValueError):
idx[v]
else:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
#with tm.assertRaises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
assert_series_equal(s['2013/01'], s[0:31])
assert_series_equal(s['2013/02'], s[31:59])
assert_series_equal(s['2014'], s[365:])
invalid = ['2013/02/01 9H', '2013/02/01 09:00']
for v in invalid:
with tm.assertRaises(KeyError):
s[v]
def test_range_slice_day(self):
# GH 6716
didx = DatetimeIndex(start='2013/01/01', freq='D', periods=400)
pidx = PeriodIndex(start='2013/01/01', freq='D', periods=400)
for idx in [didx, pidx]:
# slices against index should raise IndexError
values = ['2014', '2013/02', '2013/01/02',
'2013/02/01 9H', '2013/02/01 09:00']
for v in values:
with tm.assertRaises(IndexError):
idx[v:]
s = Series(np.random.rand(len(idx)), index=idx)
assert_series_equal(s['2013/01/02':], s[1:])
assert_series_equal(s['2013/01/02':'2013/01/05'], s[1:5])
assert_series_equal(s['2013/02':], s[31:])
assert_series_equal(s['2014':], s[365:])
invalid = ['2013/02/01 9H', '2013/02/01 09:00']
for v in invalid:
with tm.assertRaises(IndexError):
idx[v:]
def test_getitem_seconds(self):
# GH 6716
didx = DatetimeIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)
pidx = PeriodIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = ['2014', '2013/02', '2013/01/02',
'2013/02/01 9H', '2013/02/01 09:00']
for v in values:
if _np_version_under1p9:
with tm.assertRaises(ValueError):
idx[v]
else:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
#with tm.assertRaises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
assert_series_equal(s['2013/01/01 10:00'], s[3600:3660])
assert_series_equal(s['2013/01/01 9H'], s[:3600])
for d in ['2013/01/01', '2013/01', '2013']:
assert_series_equal(s[d], s)
def test_range_slice_seconds(self):
# GH 6716
didx = DatetimeIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)
pidx = PeriodIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)
for idx in [didx, pidx]:
# slices against index should raise IndexError
values = ['2014', '2013/02', '2013/01/02',
'2013/02/01 9H', '2013/02/01 09:00']
for v in values:
with tm.assertRaises(IndexError):
idx[v:]
s = Series(np.random.rand(len(idx)), index=idx)
assert_series_equal(s['2013/01/01 09:05':'2013/01/01 09:10'], s[300:660])
assert_series_equal(s['2013/01/01 10:00':'2013/01/01 10:05'], s[3600:3960])
assert_series_equal(s['2013/01/01 10H':], s[3600:])
assert_series_equal(s[:'2013/01/01 09:30'], s[:1860])
for d in ['2013/01/01', '2013/01', '2013']:
assert_series_equal(s[d:], s)
def test_range_slice_outofbounds(self):
# GH 5407
didx = DatetimeIndex(start='2013/10/01', freq='D', periods=10)
pidx = PeriodIndex(start='2013/10/01', freq='D', periods=10)
for idx in [didx, pidx]:
df = DataFrame(dict(units=[100 + i for i in range(10)]), index=idx)
empty = DataFrame(index=idx.__class__([], freq='D'), columns=['units'])
empty['units'] = empty['units'].astype('int64')
tm.assert_frame_equal(df['2013/09/01':'2013/09/30'], empty)
tm.assert_frame_equal(df['2013/09/30':'2013/10/02'], df.iloc[:2])
tm.assert_frame_equal(df['2013/10/01':'2013/10/02'], df.iloc[:2])
tm.assert_frame_equal(df['2013/10/02':'2013/09/30'], empty)
tm.assert_frame_equal(df['2013/10/15':'2013/10/17'], empty)
tm.assert_frame_equal(df['2013-06':'2013-09'], empty)
tm.assert_frame_equal(df['2013-11':'2013-12'], empty)
def test_pindex_fieldaccessor_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2012-03', '2012-04'], freq='D')
self.assert_numpy_array_equal(idx.year, np.array([2011, 2011, -1, 2012, 2012]))
self.assert_numpy_array_equal(idx.month, np.array([1, 2, -1, 3, 4]))
def test_pindex_qaccess(self):
pi = PeriodIndex(['2Q05', '3Q05', '4Q05', '1Q06', '2Q06'], freq='Q')
s = Series(np.random.rand(len(pi)), index=pi).cumsum()
# Todo: fix these accessors!
self.assertEqual(s['05Q4'], s[2])
def test_period_dt64_round_trip(self):
dti = date_range('1/1/2000', '1/7/2002', freq='B')
pi = dti.to_period()
self.assertTrue(pi.to_timestamp().equals(dti))
dti = date_range('1/1/2000', '1/7/2002', freq='B')
pi = dti.to_period(freq='H')
self.assertTrue(pi.to_timestamp().equals(dti))
def test_to_period_quarterly(self):
# make sure we can make the round trip
for month in MONTHS:
freq = 'Q-%s' % month
rng = period_range('1989Q3', '1991Q3', freq=freq)
stamps = rng.to_timestamp()
result = stamps.to_period(freq)
self.assertTrue(rng.equals(result))
def test_to_period_quarterlyish(self):
offsets = ['BQ', 'QS', 'BQS']
for off in offsets:
rng = date_range('01-Jan-2012', periods=8, freq=off)
prng = rng.to_period()
self.assertEqual(prng.freq, 'Q-DEC')
def test_to_period_annualish(self):
offsets = ['BA', 'AS', 'BAS']
for off in offsets:
rng = date_range('01-Jan-2012', periods=8, freq=off)
prng = rng.to_period()
self.assertEqual(prng.freq, 'A-DEC')
def test_to_period_monthish(self):
offsets = ['MS', 'BM']
for off in offsets:
rng = date_range('01-Jan-2012', periods=8, freq=off)
prng = rng.to_period()
self.assertEqual(prng.freq, 'M')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
rng = date_range('01-Jan-2012', periods=8, freq='EOM')
prng = rng.to_period()
self.assertEqual(prng.freq, 'M')
def test_multiples(self):
result1 = Period('1989', freq='2A')
result2 = Period('1989', freq='A')
self.assertEqual(result1.ordinal, result2.ordinal)
self.assertEqual(result1.freqstr, '2A-DEC')
self.assertEqual(result2.freqstr, 'A-DEC')
self.assertEqual(result1.freq, offsets.YearEnd(2))
self.assertEqual(result2.freq, offsets.YearEnd())
self.assertEqual((result1 + 1).ordinal, result1.ordinal + 2)
self.assertEqual((result1 - 1).ordinal, result2.ordinal - 2)
def test_pindex_multiples(self):
pi = PeriodIndex(start='1/1/11', end='12/31/11', freq='2M')
expected = PeriodIndex(['2011-01', '2011-03', '2011-05', '2011-07',
'2011-09', '2011-11'], freq='M')
tm.assert_index_equal(pi, expected)
self.assertEqual(pi.freq, offsets.MonthEnd(2))
self.assertEqual(pi.freqstr, '2M')
pi = period_range(start='1/1/11', end='12/31/11', freq='2M')
tm.assert_index_equal(pi, expected)
self.assertEqual(pi.freq, offsets.MonthEnd(2))
self.assertEqual(pi.freqstr, '2M')
pi = period_range(start='1/1/11', periods=6, freq='2M')
tm.assert_index_equal(pi, expected)
self.assertEqual(pi.freq, offsets.MonthEnd(2))
self.assertEqual(pi.freqstr, '2M')
def test_iteration(self):
index = PeriodIndex(start='1/1/10', periods=4, freq='B')
result = list(index)
tm.assertIsInstance(result[0], Period)
self.assertEqual(result[0].freq, index.freq)
def test_take(self):
index = PeriodIndex(start='1/1/10', end='12/31/12', freq='D', name='idx')
expected = PeriodIndex([datetime(2010, 1, 6), datetime(2010, 1, 7),
datetime(2010, 1, 9), datetime(2010, 1, 13)],
freq='D', name='idx')
taken1 = index.take([5, 6, 8, 12])
taken2 = index[[5, 6, 8, 12]]
for taken in [taken1, taken2]:
self.assertTrue(taken.equals(expected))
tm.assertIsInstance(taken, PeriodIndex)
self.assertEqual(taken.freq, index.freq)
self.assertEqual(taken.name, expected.name)
def test_joins(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
for kind in ['inner', 'outer', 'left', 'right']:
joined = index.join(index[:-5], how=kind)
tm.assertIsInstance(joined, PeriodIndex)
self.assertEqual(joined.freq, index.freq)
def test_join_self(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
for kind in ['inner', 'outer', 'left', 'right']:
res = index.join(index, how=kind)
self.assertIs(index, res)
def test_join_does_not_recur(self):
df = tm.makeCustomDataframe(3, 2, data_gen_f=lambda *args:
np.random.randint(2), c_idx_type='p',
r_idx_type='dt')
s = df.iloc[:2, 0]
res = s.index.join(df.columns, how='outer')
expected = Index([s.index[0], s.index[1],
df.columns[0], df.columns[1]], object)
tm.assert_index_equal(res, expected)
def test_align_series(self):
rng = period_range('1/1/2000', '1/1/2010', freq='A')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected[1::2] = np.nan
assert_series_equal(result, expected)
result = ts + _permute(ts[::2])
assert_series_equal(result, expected)
# it works!
for kind in ['inner', 'outer', 'left', 'right']:
ts.align(ts[::2], join=kind)
msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)"
with assertRaisesRegexp(ValueError, msg):
ts + ts.asfreq('D', how="end")
def test_align_frame(self):
rng = period_range('1/1/2000', '1/1/2010', freq='A')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.values[1::2] = np.nan
tm.assert_frame_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_frame_equal(result, expected)
def test_union(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
result = index[:-5].union(index[10:])
self.assertTrue(result.equals(index))
# not in order
result = _permute(index[:-5]).union(_permute(index[10:]))
self.assertTrue(result.equals(index))
# raise if different frequencies
index = period_range('1/1/2000', '1/20/2000', freq='D')
index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED')
self.assertRaises(ValueError, index.union, index2)
self.assertRaises(ValueError, index.join, index.to_timestamp())
index3 = period_range('1/1/2000', '1/20/2000', freq='2D')
self.assertRaises(ValueError, index.join, index3)
def test_intersection(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
result = index[:-5].intersection(index[10:])
self.assertTrue(result.equals(index[10:-5]))
# not in order
left = _permute(index[:-5])
right = _permute(index[10:])
result = left.intersection(right).sort_values()
self.assertTrue(result.equals(index[10:-5]))
# raise if different frequencies
index = period_range('1/1/2000', '1/20/2000', freq='D')
index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED')
self.assertRaises(ValueError, index.intersection, index2)
index3 = period_range('1/1/2000', '1/20/2000', freq='2D')
self.assertRaises(ValueError, index.intersection, index3)
def test_fields(self):
# year, month, day, hour, minute
# second, weekofyear, week, dayofweek, weekday, dayofyear, quarter
# qyear
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2005')
self._check_all_fields(pi)
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2002')
self._check_all_fields(pi)
pi = PeriodIndex(freq='M', start='1/1/2001', end='1/1/2002')
self._check_all_fields(pi)
pi = PeriodIndex(freq='D', start='12/1/2001', end='6/1/2001')
self._check_all_fields(pi)
pi = PeriodIndex(freq='B', start='12/1/2001', end='6/1/2001')
self._check_all_fields(pi)
pi = PeriodIndex(freq='H', start='12/31/2001', end='1/1/2002 23:00')
self._check_all_fields(pi)
pi = PeriodIndex(freq='Min', start='12/31/2001', end='1/1/2002 00:20')
self._check_all_fields(pi)
pi = PeriodIndex(freq='S', start='12/31/2001 00:00:00',
end='12/31/2001 00:05:00')
self._check_all_fields(pi)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
self._check_all_fields(i1)
def _check_all_fields(self, periodindex):
fields = ['year', 'month', 'day', 'hour', 'minute',
'second', 'weekofyear', 'week', 'dayofweek',
'weekday', 'dayofyear', 'quarter', 'qyear', 'days_in_month']
periods = list(periodindex)
for field in fields:
field_idx = getattr(periodindex, field)
assert_equal(len(periodindex), len(field_idx))
for x, val in zip(periods, field_idx):
assert_equal(getattr(x, field), val)
def test_is_full(self):
index = PeriodIndex([2005, 2007, 2009], freq='A')
self.assertFalse(index.is_full)
index = PeriodIndex([2005, 2006, 2007], freq='A')
self.assertTrue(index.is_full)
index = PeriodIndex([2005, 2005, 2007], freq='A')
self.assertFalse(index.is_full)
index = PeriodIndex([2005, 2005, 2006], freq='A')
self.assertTrue(index.is_full)
index = PeriodIndex([2006, 2005, 2005], freq='A')
self.assertRaises(ValueError, getattr, index, 'is_full')
self.assertTrue(index[:0].is_full)
def test_map(self):
index = PeriodIndex([2005, 2007, 2009], freq='A')
result = index.map(lambda x: x + 1)
expected = index + 1
self.assertTrue(result.equals(expected))
result = index.map(lambda x: x.ordinal)
exp = [x.ordinal for x in index]
tm.assert_numpy_array_equal(result, exp)
def test_map_with_string_constructor(self):
raw = [2005, 2007, 2009]
index = PeriodIndex(raw, freq='A')
types = str,
if compat.PY3:
# unicode
types += compat.text_type,
for t in types:
expected = np.array(lmap(t, raw), dtype=object)
res = index.map(t)
# should return an array
tm.assertIsInstance(res, np.ndarray)
# preserve element types
self.assertTrue(all(isinstance(resi, t) for resi in res))
# dtype should be object
self.assertEqual(res.dtype, np.dtype('object').type)
# lastly, values should compare equal
tm.assert_numpy_array_equal(res, expected)
def test_convert_array_of_periods(self):
rng = period_range('1/1/2000', periods=20, freq='D')
periods = list(rng)
result = pd.Index(periods)
tm.assertIsInstance(result, PeriodIndex)
def test_with_multi_index(self):
# #1705
index = date_range('1/1/2012', periods=4, freq='12H')
index_as_arrays = [index.to_period(freq='D'), index.hour]
s = Series([0, 1, 2, 3], index_as_arrays)
tm.assertIsInstance(s.index.levels[0], PeriodIndex)
tm.assertIsInstance(s.index.values[0][0], Period)
def test_to_datetime_1703(self):
index = period_range('1/1/2012', periods=4, freq='D')
result = index.to_datetime()
self.assertEqual(result[0], Timestamp('1/1/2012'))
def test_get_loc_msg(self):
idx = period_range('2000-1-1', freq='A', periods=10)
bad_period = Period('2012', 'A')
self.assertRaises(KeyError, idx.get_loc, bad_period)
try:
idx.get_loc(bad_period)
except KeyError as inst:
self.assertEqual(inst.args[0], bad_period)
def test_append_concat(self):
# #1815
d1 = date_range('12/31/1990', '12/31/1999', freq='A-DEC')
d2 = date_range('12/31/2000', '12/31/2009', freq='A-DEC')
s1 = Series(np.random.randn(10), d1)
s2 = Series(np.random.randn(10), d2)
s1 = s1.to_period()
s2 = s2.to_period()
# drops index
result = pd.concat([s1, s2])
tm.assertIsInstance(result.index, PeriodIndex)
self.assertEqual(result.index[0], s1.index[0])
def test_pickle_freq(self):
# GH2891
prng = period_range('1/1/2011', '1/1/2012', freq='M')
new_prng = self.round_trip_pickle(prng)
self.assertEqual(new_prng.freq, offsets.MonthEnd())
self.assertEqual(new_prng.freqstr, 'M')
def test_slice_keep_name(self):
idx = period_range('20010101', periods=10, freq='D', name='bob')
self.assertEqual(idx.name, idx[1:].name)
def test_factorize(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
idx2 = pd.PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
exp_arr = np.array([2, 2, 1, 0, 2, 0])
arr, idx = idx2.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
exp_arr = np.array([0, 0, 1, 2, 0, 2])
exp_idx = PeriodIndex(['2014-03', '2014-02', '2014-01'], freq='M')
arr, idx = idx2.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
def test_recreate_from_data(self):
for o in ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'N', 'H']:
org = PeriodIndex(start='2001/04/01', freq=o, periods=1)
idx = PeriodIndex(org.values, freq=o)
self.assertTrue(idx.equals(org))
def test_combine_first(self):
# GH 3367
didx = pd.DatetimeIndex(start='1950-01-31', end='1950-07-31', freq='M')
pidx = pd.PeriodIndex(start=pd.Period('1950-1'), end=pd.Period('1950-7'), freq='M')
# check to be consistent with DatetimeIndex
for idx in [didx, pidx]:
a = pd.Series([1, np.nan, np.nan, 4, 5, np.nan, 7], index=idx)
b = pd.Series([9, 9, 9, 9, 9, 9, 9], index=idx)
result = a.combine_first(b)
expected = pd.Series([1, 9, 9, 4, 5, 9, 7], index=idx, dtype=np.float64)
tm.assert_series_equal(result, expected)
def test_searchsorted(self):
for freq in ['D', '2D']:
pidx = pd.PeriodIndex(['2014-01-01', '2014-01-02', '2014-01-03',
'2014-01-04', '2014-01-05'], freq=freq)
p1 = pd.Period('2014-01-01', freq=freq)
self.assertEqual(pidx.searchsorted(p1), 0)
p2 = pd.Period('2014-01-04', freq=freq)
self.assertEqual(pidx.searchsorted(p2), 3)
msg = "Input has different freq=H from PeriodIndex"
with self.assertRaisesRegexp(ValueError, msg):
pidx.searchsorted(pd.Period('2014-01-01', freq='H'))
msg = "Input has different freq=5D from PeriodIndex"
with self.assertRaisesRegexp(ValueError, msg):
pidx.searchsorted(pd.Period('2014-01-01', freq='5D'))
def test_round_trip(self):
p = Period('2000Q1')
new_p = self.round_trip_pickle(p)
self.assertEqual(new_p, p)
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestMethods(tm.TestCase):
"Base test class for MaskedArrays."
def test_add(self):
dt1 = Period(freq='D', year=2008, month=1, day=1)
dt2 = Period(freq='D', year=2008, month=1, day=2)
assert_equal(dt1 + 1, dt2)
#
# GH 4731
msg = "unsupported operand type\(s\)"
with tm.assertRaisesRegexp(TypeError, msg):
dt1 + "str"
with tm.assertRaisesRegexp(TypeError, msg):
dt1 + dt2
def test_add_offset(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('2011', freq=freq)
self.assertEqual(p + offsets.YearEnd(2), Period('2013', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p + o
for freq in ['M', '2M', '3M']:
p = Period('2011-03', freq=freq)
self.assertEqual(p + offsets.MonthEnd(2), Period('2011-05', freq=freq))
self.assertEqual(p + offsets.MonthEnd(12), Period('2012-03', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p + o
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('2011-04-01', freq=freq)
self.assertEqual(p + offsets.Day(5), Period('2011-04-06', freq=freq))
self.assertEqual(p + offsets.Hour(24), Period('2011-04-02', freq=freq))
self.assertEqual(p + np.timedelta64(2, 'D'), Period('2011-04-03', freq=freq))
self.assertEqual(p + np.timedelta64(3600 * 24, 's'), Period('2011-04-02', freq=freq))
self.assertEqual(p + timedelta(-2), Period('2011-03-30', freq=freq))
self.assertEqual(p + timedelta(hours=48), Period('2011-04-03', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23)]:
with tm.assertRaises(ValueError):
p + o
for freq in ['H', '2H', '3H']:
p = Period('2011-04-01 09:00', freq=freq)
self.assertEqual(p + offsets.Day(2), Period('2011-04-03 09:00', freq=freq))
self.assertEqual(p + offsets.Hour(3), Period('2011-04-01 12:00', freq=freq))
self.assertEqual(p + np.timedelta64(3, 'h'), Period('2011-04-01 12:00', freq=freq))
self.assertEqual(p + np.timedelta64(3600, 's'), Period('2011-04-01 10:00', freq=freq))
self.assertEqual(p + timedelta(minutes=120), Period('2011-04-01 11:00', freq=freq))
self.assertEqual(p + timedelta(days=4, minutes=180), Period('2011-04-05 12:00', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]:
with tm.assertRaises(ValueError):
p + o
def test_add_offset_nat(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('NaT', freq=freq)
for o in [offsets.YearEnd(2)]:
self.assertEqual((p + o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p + o
for freq in ['M', '2M', '3M']:
p = Period('NaT', freq=freq)
for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]:
self.assertEqual((p + o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p + o
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'),
np.timedelta64(3600 * 24, 's'), timedelta(-2), timedelta(hours=48)]:
self.assertEqual((p + o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23)]:
with tm.assertRaises(ValueError):
p + o
for freq in ['H', '2H', '3H']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'),
np.timedelta64(3600, 's'), timedelta(minutes=120),
timedelta(days=4, minutes=180)]:
self.assertEqual((p + o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]:
with tm.assertRaises(ValueError):
p + o
def test_sub_offset(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('2011', freq=freq)
self.assertEqual(p - offsets.YearEnd(2), Period('2009', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p - o
for freq in ['M', '2M', '3M']:
p = Period('2011-03', freq=freq)
self.assertEqual(p - offsets.MonthEnd(2), Period('2011-01', freq=freq))
self.assertEqual(p - offsets.MonthEnd(12), Period('2010-03', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p - o
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('2011-04-01', freq=freq)
self.assertEqual(p - offsets.Day(5), Period('2011-03-27', freq=freq))
self.assertEqual(p - offsets.Hour(24), Period('2011-03-31', freq=freq))
self.assertEqual(p - np.timedelta64(2, 'D'), Period('2011-03-30', freq=freq))
self.assertEqual(p - np.timedelta64(3600 * 24, 's'), Period('2011-03-31', freq=freq))
self.assertEqual(p - timedelta(-2), Period('2011-04-03', freq=freq))
self.assertEqual(p - timedelta(hours=48), Period('2011-03-30', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23)]:
with tm.assertRaises(ValueError):
p - o
for freq in ['H', '2H', '3H']:
p = Period('2011-04-01 09:00', freq=freq)
self.assertEqual(p - offsets.Day(2), Period('2011-03-30 09:00', freq=freq))
self.assertEqual(p - offsets.Hour(3), Period('2011-04-01 06:00', freq=freq))
self.assertEqual(p - np.timedelta64(3, 'h'), Period('2011-04-01 06:00', freq=freq))
self.assertEqual(p - np.timedelta64(3600, 's'), Period('2011-04-01 08:00', freq=freq))
self.assertEqual(p - timedelta(minutes=120), Period('2011-04-01 07:00', freq=freq))
self.assertEqual(p - timedelta(days=4, minutes=180), Period('2011-03-28 06:00', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]:
with tm.assertRaises(ValueError):
p - o
def test_sub_offset_nat(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('NaT', freq=freq)
for o in [offsets.YearEnd(2)]:
self.assertEqual((p - o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p - o
for freq in ['M', '2M', '3M']:
p = Period('NaT', freq=freq)
for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]:
self.assertEqual((p - o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p - o
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'),
np.timedelta64(3600 * 24, 's'), timedelta(-2), timedelta(hours=48)]:
self.assertEqual((p - o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23)]:
with tm.assertRaises(ValueError):
p - o
for freq in ['H', '2H', '3H']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'),
np.timedelta64(3600, 's'), timedelta(minutes=120),
timedelta(days=4, minutes=180)]:
self.assertEqual((p - o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]:
with tm.assertRaises(ValueError):
p - o
def test_nat_ops(self):
for freq in ['M', '2M', '3M']:
p = Period('NaT', freq=freq)
self.assertEqual((p + 1).ordinal, tslib.iNaT)
self.assertEqual((p - 1).ordinal, tslib.iNaT)
self.assertEqual((p - Period('2011-01', freq=freq)).ordinal, tslib.iNaT)
self.assertEqual((Period('2011-01', freq=freq) - p).ordinal, tslib.iNaT)
def test_pi_ops_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx')
result = idx + 2
expected = PeriodIndex(['2011-03', '2011-04', 'NaT', '2011-06'], freq='M', name='idx')
self.assertTrue(result.equals(expected))
result2 = result - 2
self.assertTrue(result2.equals(idx))
msg = "unsupported operand type\(s\)"
with tm.assertRaisesRegexp(TypeError, msg):
idx + "str"
def test_pi_ops_array(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx')
result = idx + np.array([1, 2, 3, 4])
exp = PeriodIndex(['2011-02', '2011-04', 'NaT', '2011-08'], freq='M', name='idx')
self.assert_index_equal(result, exp)
result = np.add(idx, np.array([4, -1, 1, 2]))
exp = PeriodIndex(['2011-05', '2011-01', 'NaT', '2011-06'], freq='M', name='idx')
self.assert_index_equal(result, exp)
result = idx - np.array([1, 2, 3, 4])
exp = PeriodIndex(['2010-12', '2010-12', 'NaT', '2010-12'], freq='M', name='idx')
self.assert_index_equal(result, exp)
result = np.subtract(idx, np.array([3, 2, 3, -2]))
exp = PeriodIndex(['2010-10', '2010-12', 'NaT', '2011-06'], freq='M', name='idx')
self.assert_index_equal(result, exp)
# incompatible freq
msg = "Input has different freq from PeriodIndex\(freq=M\)"
with tm.assertRaisesRegexp(ValueError, msg):
idx + np.array([np.timedelta64(1, 'D')] * 4)
idx = PeriodIndex(['2011-01-01 09:00', '2011-01-01 10:00', 'NaT',
'2011-01-01 12:00'], freq='H', name='idx')
result = idx + np.array([np.timedelta64(1, 'D')] * 4)
exp = PeriodIndex(['2011-01-02 09:00', '2011-01-02 10:00', 'NaT',
'2011-01-02 12:00'], freq='H', name='idx')
self.assert_index_equal(result, exp)
result = idx - np.array([np.timedelta64(1, 'h')] * 4)
exp = PeriodIndex(['2011-01-01 08:00', '2011-01-01 09:00', 'NaT',
'2011-01-01 11:00'], freq='H', name='idx')
self.assert_index_equal(result, exp)
msg = "Input has different freq from PeriodIndex\(freq=H\)"
with tm.assertRaisesRegexp(ValueError, msg):
idx + np.array([np.timedelta64(1, 's')] * 4)
idx = PeriodIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00', 'NaT',
'2011-01-01 12:00:00'], freq='S', name='idx')
result = idx + np.array([np.timedelta64(1, 'h'), np.timedelta64(30, 's'),
np.timedelta64(2, 'h'), np.timedelta64(15, 'm')])
exp = PeriodIndex(['2011-01-01 10:00:00', '2011-01-01 10:00:30', 'NaT',
'2011-01-01 12:15:00'], freq='S', name='idx')
self.assert_index_equal(result, exp)
class TestPeriodRepresentation(tm.TestCase):
"""
Wish to match NumPy units
"""
def test_annual(self):
self._check_freq('A', 1970)
def test_monthly(self):
self._check_freq('M', '1970-01')
def test_weekly(self):
self._check_freq('W-THU', '1970-01-01')
def test_daily(self):
self._check_freq('D', '1970-01-01')
def test_business_daily(self):
self._check_freq('B', '1970-01-01')
def test_hourly(self):
self._check_freq('H', '1970-01-01')
def test_minutely(self):
self._check_freq('T', '1970-01-01')
def test_secondly(self):
self._check_freq('S', '1970-01-01')
def test_millisecondly(self):
self._check_freq('L', '1970-01-01')
def test_microsecondly(self):
self._check_freq('U', '1970-01-01')
def test_nanosecondly(self):
self._check_freq('N', '1970-01-01')
def _check_freq(self, freq, base_date):
rng = PeriodIndex(start=base_date, periods=10, freq=freq)
exp = np.arange(10, dtype=np.int64)
self.assert_numpy_array_equal(rng.values, exp)
def test_negone_ordinals(self):
freqs = ['A', 'M', 'Q', 'D', 'H', 'T', 'S']
period = Period(ordinal=-1, freq='D')
for freq in freqs:
repr(period.asfreq(freq))
for freq in freqs:
period = Period(ordinal=-1, freq=freq)
repr(period)
self.assertEqual(period.year, 1969)
period = Period(ordinal=-1, freq='B')
repr(period)
period = Period(ordinal=-1, freq='W')
repr(period)
class TestComparisons(tm.TestCase):
def setUp(self):
self.january1 = Period('2000-01', 'M')
self.january2 = Period('2000-01', 'M')
self.february = Period('2000-02', 'M')
self.march = Period('2000-03', 'M')
self.day = Period('2012-01-01', 'D')
def test_equal(self):
self.assertEqual(self.january1, self.january2)
def test_equal_Raises_Value(self):
with tm.assertRaises(ValueError):
self.january1 == self.day
def test_notEqual(self):
self.assertNotEqual(self.january1, 1)
self.assertNotEqual(self.january1, self.february)
def test_greater(self):
self.assertTrue(self.february > self.january1)
def test_greater_Raises_Value(self):
with tm.assertRaises(ValueError):
self.january1 > self.day
def test_greater_Raises_Type(self):
with tm.assertRaises(TypeError):
self.january1 > 1
def test_greaterEqual(self):
self.assertTrue(self.january1 >= self.january2)
def test_greaterEqual_Raises_Value(self):
with tm.assertRaises(ValueError):
self.january1 >= self.day
with tm.assertRaises(TypeError):
print(self.january1 >= 1)
def test_smallerEqual(self):
self.assertTrue(self.january1 <= self.january2)
def test_smallerEqual_Raises_Value(self):
with tm.assertRaises(ValueError):
self.january1 <= self.day
def test_smallerEqual_Raises_Type(self):
with tm.assertRaises(TypeError):
self.january1 <= 1
def test_smaller(self):
self.assertTrue(self.january1 < self.february)
def test_smaller_Raises_Value(self):
with tm.assertRaises(ValueError):
self.january1 < self.day
def test_smaller_Raises_Type(self):
with tm.assertRaises(TypeError):
self.january1 < 1
def test_sort(self):
periods = [self.march, self.january1, self.february]
correctPeriods = [self.january1, self.february, self.march]
self.assertEqual(sorted(periods), correctPeriods)
def test_period_nat_comp(self):
p_nat = Period('NaT', freq='D')
p = Period('2011-01-01', freq='D')
nat = pd.Timestamp('NaT')
t = pd.Timestamp('2011-01-01')
# confirm Period('NaT') work identical with Timestamp('NaT')
for left, right in [(p_nat, p), (p, p_nat), (p_nat, p_nat),
(nat, t), (t, nat), (nat, nat)]:
self.assertEqual(left < right, False)
self.assertEqual(left > right, False)
self.assertEqual(left == right, False)
self.assertEqual(left != right, True)
self.assertEqual(left <= right, False)
self.assertEqual(left >= right, False)
def test_pi_pi_comp(self):
for freq in ['M', '2M', '3M']:
base = PeriodIndex(['2011-01', '2011-02',
'2011-03', '2011-04'], freq=freq)
p = Period('2011-02', freq=freq)
exp = np.array([False, True, False, False])
self.assert_numpy_array_equal(base == p, exp)
exp = np.array([True, False, True, True])
self.assert_numpy_array_equal(base != p, exp)
exp = np.array([False, False, True, True])
self.assert_numpy_array_equal(base > p, exp)
exp = np.array([True, False, False, False])
self.assert_numpy_array_equal(base < p, exp)
exp = np.array([False, True, True, True])
self.assert_numpy_array_equal(base >= p, exp)
exp = np.array([True, True, False, False])
self.assert_numpy_array_equal(base <= p, exp)
idx = PeriodIndex(['2011-02', '2011-01', '2011-03', '2011-05'], freq=freq)
exp = np.array([False, False, True, False])
self.assert_numpy_array_equal(base == idx, exp)
exp = np.array([True, True, False, True])
self.assert_numpy_array_equal(base != idx, exp)
exp = np.array([False, True, False, False])
self.assert_numpy_array_equal(base > idx, exp)
exp = np.array([True, False, False, True])
self.assert_numpy_array_equal(base < idx, exp)
exp = np.array([False, True, True, False])
self.assert_numpy_array_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
self.assert_numpy_array_equal(base <= idx, exp)
# different base freq
msg = "Input has different freq=A-DEC from PeriodIndex"
with tm.assertRaisesRegexp(ValueError, msg):
base <= Period('2011', freq='A')
with tm.assertRaisesRegexp(ValueError, msg):
idx = PeriodIndex(['2011', '2012', '2013', '2014'], freq='A')
base <= idx
# different mult
msg = "Input has different freq=4M from PeriodIndex"
with tm.assertRaisesRegexp(ValueError, msg):
base <= Period('2011', freq='4M')
with tm.assertRaisesRegexp(ValueError, msg):
idx = PeriodIndex(['2011', '2012', '2013', '2014'], freq='4M')
base <= idx
def test_pi_nat_comp(self):
for freq in ['M', '2M', '3M']:
idx1 = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-05'], freq=freq)
result = idx1 > Period('2011-02', freq=freq)
exp = np.array([False, False, False, True])
self.assert_numpy_array_equal(result, exp)
result = idx1 == Period('NaT', freq=freq)
exp = np.array([False, False, False, False])
self.assert_numpy_array_equal(result, exp)
result = idx1 != Period('NaT', freq=freq)
exp = np.array([True, True, True, True])
self.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(['2011-02', '2011-01', '2011-04', 'NaT'], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
self.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
self.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
self.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
self.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
self.assert_numpy_array_equal(result, exp)
diff = PeriodIndex(['2011-02', '2011-01', '2011-04', 'NaT'], freq='4M')
msg = "Input has different freq=4M from PeriodIndex"
with tm.assertRaisesRegexp(ValueError, msg):
idx1 > diff
with tm.assertRaisesRegexp(ValueError, msg):
idx1 == diff
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-2.0 |
sergpolly/Thermal_adapt_scripts | retrieve_essential_genbanks.py | 1 | 2578 | import re
import os
import sys
from Bio import Seq
from Bio import SeqIO
from Bio.Alphabet import IUPAC
import numpy as np
import pandas as pd
# path = os.path.join(os.path.expanduser('~'),'GENOMES_BACTER/ftp.ncbi.nih.gov/refseq/release/bacteria/genbank')
path = os.path.join(os.path.expanduser('~'),'GENOMES_BACTER_RELEASE69/genbank')
path_nucs = os.path.join(os.path.expanduser('~'),'GENOMES_BACTER_RELEASE69/nucfasta')
print "Loading original databases and description file ..."
# summary file parsing ...
summary_fname = "env_catalog_compgenome.dat"
summary = pd.read_csv(os.path.join(path,summary_fname))
# genbank DB ...
gbdb_fname = os.path.join(path,'genbank.idx')
gbdb = SeqIO.index_db(gbdb_fname)
# nucleotides fasta files ...
fndb_fname = os.path.join(path_nucs,'nucfasta.idx')
fndb = SeqIO.index_db(fndb_fname,alphabet=IUPAC.IUPACAmbiguousDNA(),key_function=(lambda name: name.split('|')[3]))
# gbdb and fndb keys must be the same ...
print "Original databases are loaded ..."
print
print "Output the reduced genbank with the required entries only ..."
gb_small_fname = "tmp_gbank.gb"
with open(os.path.join(path,gb_small_fname),"w") as fp:
for idx in summary['GenomicID']:
fp.write(gbdb.get_raw(idx))
# index in-memory style the smaller subset of the genbank database ...
gbdb_small = SeqIO.index(os.path.join(path,gb_small_fname),"genbank")
# records = SeqIO.to_dict(SeqIO.parse("Quality/example.fastq", "fastq"))
# maybe to_dict would be faster, as we are parsing all of the records anyways ...
# maybe having another tmp file for nucfasta records would make things faster
# try it! sometime. So far, this takes ~40 mins
print "tmp file is written and indexed ..."
print
print "Matching nucleotide sequences from fasta with corresponding SeqRecords in genbank ..."
sequences = []
for idx in summary['GenomicID']:
seqrec = gbdb_small[idx]
nuc_seq = fndb[idx].seq
# next thing should never happen in practice ...
if ( len(seqrec.seq)!=len(nuc_seq) ):
print "Genome length doesn't match between fasta and genbank for %s record"%idx
print "Proceed anyways ..."
# give the genbank, its actual nucleotide sequence ...
seqrec.seq = nuc_seq
sequences.append(seqrec)
print "All set"
print
print "Writing final genbank ..."
# size_seqs = sys.getsizeof(sequences)
# print "All interesting sequences are in memory! %d byte of interesting sequences."%size_seqs
print "Writing all of that to the condensed.gb file"
with open("condensed.raw.gb","w") as fp:
SeqIO.write(sequences,fp,"genbank")
| mit |
shangwuhencc/scikit-learn | sklearn/decomposition/tests/test_kernel_pca.py | 57 | 8062 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
assert_equal, assert_not_equal,
assert_raises)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed.size, 0)
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 2))
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
| bsd-3-clause |
ctogle/dilapidator | src/dilap/core/plotting.py | 1 | 5799 | from dilap.geometry.vec3 import vec3
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import pdb
###############################################################################
# create a mpl 2d axes object
def plot_axes_xy(x = 5,o = (0,0),f = None,aspect = None):
if f is None:ax = plt.figure().add_subplot(111)
else:ax = f.add_subplot(111)
ax.set_xlim([-x+o[0],x+o[0]])
ax.set_ylim([-x+o[1],x+o[1]])
if aspect == 'equal':ax.set_aspect('equal')
return ax
# create a mpl 3d axes object
def plot_axes(x = 5,f = None):
if f is None:ax = plt.figure().add_subplot(111,projection = '3d')
else:ax = f.add_subplot(111,projection = '3d')
ax.set_xlim([-x,x])
ax.set_ylim([-x,x])
ax.set_zlim([-(9.0/16.0)*x,(9.0/16.0)*x])
return ax
def plot_point_xy(pt,ax,mk = 'o',col = None):
if col is None:col = 'black'
ax.plot([pt.x],[pt.y],marker = mk,color = col)
return ax
def plot_vector_xy(pt,tn,ax,mk = 'o',lw = 1.0,col = None):
tip = pt.cp().trn(tn)
ax = plot_point_xy(pt,ax,mk = mk,col = col)
ax = plot_edges_xy([pt,tip],ax,mk = mk,lw = lw,col = col)
ax = plot_point_xy(tip,ax,mk = 'd',col = col)
return ax
def plot_point_xy_annotate(pt,ax,text):
ax.annotate(text,xy = (pt.x,pt.y),xytext = (-2, 10),
textcoords = 'offset points',ha = 'right',va = 'bottom',
arrowprops = dict(arrowstyle = '->',
connectionstyle = 'arc3,rad=0'))
return ax
def plot_point(pt,ax,mk = 'o',col = None):
if col is None:col = 'black'
ax.plot([pt.x],[pt.y],zs = [pt.z],marker = mk,color = col)
return ax
def plot_points_xy(points,ax = None,ms = None,cs = None,number = False):
if ax is None:ax = plot_axes_xy()
if ms is None:ms = ['o']*len(points)
if cs is None:cs = [None]*len(points)
for pdx in range(len(points)):
plot_point_xy(points[pdx],ax,ms[pdx],cs[pdx])
if number:plot_point_xy_annotate(points[pdx],ax,str(pdx+1))
return ax
def plot_points(points,ax = None,ms = None,cs = None,marker = None):
if ax is None:ax = plot_axes()
if marker is None:marker = 'o'
if ms is None:ms = [marker]*len(points)
if cs is None:cs = [None]*len(points)
for pdx in range(len(points)):plot_point(points[pdx],ax,ms[pdx],cs[pdx])
return ax
def plot_edges_xy(points,ax = None,mk = None,lw = 1.0,ls = '-',center = False,col = None):
if ax is None:ax = plot_axes_xy()
if mk is None:mk = '+'
if col is None:col = 'black'
pts = [p.__iter__() for p in points]
xs,ys,zs = zip(*pts)
ax.plot(xs,ys,marker = mk,lw = lw,ls = ls,color = col)
if center:
centers = [points[x-1].mid(points[x]) for x in range(1,len(points))]
plot_points_xy(centers,ax)
return ax
def plot_edges(points,ax = None,mk = None,lw = 1.0,ls = '-',center = False,col = None):
if ax is None:ax = plot_axes()
if mk is None:mk = '+'
if col is None:col = 'black'
pts = [p.__iter__() for p in points]
xs,ys,zs = zip(*pts)
ax.plot(xs,ys,zs,marker = mk,lw = lw,ls = ls,color = col)
if center:
centers = [points[x-1].mid(points[x]) for x in range(1,len(points))]
plot_points(centers,ax)
return ax
def plot_polygon_xy(points,ax = None,center = False,mk = None,lw = 1.0,ls = '-',col = None):
epts = list(points[:])
epts.append(points[0])
ax = plot_edges_xy(epts,ax,mk = mk,lw = lw,ls = ls,col = col)
if center:plot_point_xy(vec3(0,0,0).com(points),ax,mk = 's',col = col)
return ax
def plot_polygon(points,ax = None,center = False,mk = None,lw = 1.0,ls = '-',col = None):
epts = list(points[:])
epts.append(points[0])
ax = plot_edges(epts,ax,mk = mk,lw = lw,ls = ls,col = col)
if center:plot_point(vec3(0,0,0).com(points),ax,mk = 's',col = col)
return ax
def plot_polygon_full_xy(poly,ax = None,center = False,lw = 1.0,ls = '-',col = None):
if ax is None:ax = plot_axes_xy()
ebnd,ibnds = poly
plot_polygon_xy(list(ebnd),ax,center = True,lw = lw,ls = ls,col = col)
for ib in ibnds:plot_polygon_xy(list(ib),ax,center = True,lw = lw,ls = ls,col = col)
return ax
def plot_polygon_full(poly,ax = None,center = False,lw = 1.0,col = None):
if ax is None:ax = plot_axes()
ebnd,ibnds = poly
plot_polygon(list(ebnd),ax,center = True,lw = lw,col = col)
for ib in ibnds:plot_polygon(list(ib),ax,center = True,lw = lw,col = col)
return ax
def plot_tetrahedron(points,ax = None):
raise NotImplemented
def plot_line_xy(l1,l2,r = 25,ax = None,center = False,lw = 1.0,col = None):
ltan = l1.tov(l2).cpxy()
l1far = l1.cp().trn(ltan.cp().uscl(-r))
l2far = l2.cp().trn(ltan.cp().uscl( r))
ax = plot_edges_xy([l1far,l2far],ax = ax,center = center,lw = lw,col = col)
return ax
def plot_line(l1,l2,r = 25,ax = None,center = False,lw = 1.0,col = None):
ltan = l1.tov(l2)
l1far = l1.cp().trn(ltan.cp().uscl(-r))
l2far = l2.cp().trn(ltan.cp().uscl( r))
ax = plot_edges([l1far,l2far],ax = ax,center = center,lw = lw,col = col)
return ax
def plot_circle_xy(c,r,ax = None,center = False,lw = 1.0,col = None):
circ = c.pring(r,32)
ax = plot_polygon_xy(circ,ax,center,lw,col)
return ax
def plot_circle(c,r,ax = None,center = False,lw = 1.0,col = None):
circ = c.pring(r,32)
ax = plot_polygon(circ,ax,center,lw,col)
return ax
def plot_ray_xy(r,ax = None,col = None):
ax = plot_point_xy(r.o,ax,mk = 's')
ax = plot_edges_xy((r.o,r.o.cp().trn(r.d.cp().scl(100))),ax,lw = 2.0,col = col)
return ax
def plot_ray(r,ax = None,col = None):
ax = plot_point(r.o,ax,mk = 's')
ax = plot_edges((r.o,r.o.cp().trn(r.d.cp().scl(100))),ax,lw = 2.0,col = col)
return ax
###############################################################################
| mit |
nchikkam/tdd | code/msp.py | 1 | 1520 | import numpy as np
from scipy.spatial.distance import pdist, squareform
import matplotlib.pyplot as plt
def minimum_spanning_tree(X, copy_X=True):
"""X are edge weights of fully connected graph"""
if copy_X:
X = X.copy()
if X.shape[0] != X.shape[1]:
raise ValueError("X needs to be square matrix of edge weights")
n_vertices = X.shape[0]
spanning_edges = []
# initialize with node 0:
visited_vertices = [0]
num_visited = 1
# exclude self connections:
diag_indices = np.arange(n_vertices)
X[diag_indices, diag_indices] = np.inf
while num_visited != n_vertices:
new_edge = np.argmin(X[visited_vertices], axis=None)
# 2d encoding of new_edge from flat, get correct indices
new_edge = divmod(new_edge, n_vertices)
new_edge = [visited_vertices[new_edge[0]], new_edge[1]]
# add edge to tree
spanning_edges.append(new_edge)
visited_vertices.append(new_edge[1])
# remove all edges inside current tree
X[visited_vertices, new_edge[1]] = np.inf
X[new_edge[1], visited_vertices] = np.inf
num_visited += 1
return np.vstack(spanning_edges)
def test_mst():
P = np.random.uniform(size=(50, 2))
X = squareform(pdist(P))
edge_list = minimum_spanning_tree(X)
plt.scatter(P[:, 0], P[:, 1])
for edge in edge_list:
i, j = edge
plt.plot([P[i, 0], P[j, 0]], [P[i, 1], P[j, 1]], c='r')
plt.show()
if __name__ == "__main__":
test_mst() | mit |
anntzer/scikit-learn | benchmarks/bench_plot_ward.py | 14 | 1277 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import matplotlib.pyplot as plt
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
plt.figure("scikit-learn Ward's method benchmark results")
plt.imshow(np.log(ratio), aspect='auto', origin="lower")
plt.colorbar()
plt.contour(ratio, levels=[1, ], colors='k')
plt.yticks(range(len(n_features)), n_features.astype(int))
plt.ylabel('N features')
plt.xticks(range(len(n_samples)), n_samples.astype(int))
plt.xlabel('N samples')
plt.title("Scikit's time, in units of scipy time (log)")
plt.show()
| bsd-3-clause |
UNR-AERIAL/scikit-learn | examples/missing_values.py | 233 | 3056 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
joshzarrabi/e-mission-server | emission/storage/timeseries/builtin_timeseries.py | 1 | 5212 | import logging
import pandas as pd
import pymongo
import emission.core.get_database as edb
import emission.storage.timeseries.abstract_timeseries as esta
class BuiltinTimeSeries(esta.TimeSeries):
def __init__(self, user_id):
super(BuiltinTimeSeries, self).__init__(user_id)
self.key_query = lambda(key): {"metadata.key": key}
self.type_query = lambda(entry_type): {"metadata.type": entry_type}
self.timeseries_db = edb.get_timeseries_db()
@staticmethod
def get_uuid_list():
return edb.get_timeseries_db().distinct("user_id")
@staticmethod
def ts_query(tq):
time_key = "metadata.%s" % tq.timeType
ret_query = {time_key : {"$lt": tq.endTs}}
if (tq.startTs is not None):
ret_query[time_key].update({"$gte": tq.startTs})
return ret_query
def _get_query(self, key_list = None, time_query = None):
ret_query = {'user_id': self.user_id} # UUID is mandatory
if key_list is not None and len(key_list) > 0:
key_query_list = []
for key in key_list:
key_query_list.append(self.key_query(key))
ret_query.update({"$or": key_query_list})
if time_query is not None:
ret_query.update(self.ts_query(time_query))
return ret_query
@staticmethod
def _get_sort_key(time_query = None):
if time_query is None:
return "metadata.write_ts"
else:
return "metadata.%s" % time_query.timeType
@staticmethod
def _to_df_entry(entry):
ret_val = entry["data"]
ret_val["_id"] = entry["_id"]
ret_val["metadata_write_ts"] = entry["metadata"]["write_ts"]
# logging.debug("ret_val = %s " % ret_val)
return ret_val
def find_entries(self, key_list = None, time_query = None):
sort_key = self._get_sort_key(time_query)
logging.debug("curr_query = %s, sort_key = %s" %
(self._get_query(key_list, time_query), sort_key))
return self.timeseries_db.find(self._get_query(key_list, time_query)).sort(sort_key, pymongo.ASCENDING)
def get_entry_at_ts(self, key, ts_key, ts):
return self.timeseries_db.find_one({"user_id": self.user_id,
"metadata.key": key,
ts_key: ts})
def get_data_df(self, key, time_query = None):
sort_key = self._get_sort_key(time_query)
logging.debug("curr_query = %s, sort_key = %s" % (self._get_query([key], time_query), sort_key))
result_it = self.timeseries_db.find(self._get_query([key], time_query), {"data": True,
"metadata.write_ts": True}).sort(sort_key, pymongo.ASCENDING)
logging.debug("Found %s results" % result_it.count())
# Dataframe doesn't like to work off an iterator - it wants everything in memory
return pd.DataFrame([BuiltinTimeSeries._to_df_entry(e) for e in list(result_it)])
def get_max_value_for_field(self, key, field, time_query=None):
"""
Currently used to get the max value of the location values so that we can send data
that actually exists into the usercache. Is that too corner of a use case? Do we want to do
this in some other way?
:param key: the metadata key for the entries, used to identify the stream
:param field: the field in the stream whose max value we want.
:param time_query: the time range in which to search the stream
It is assumed that the values for the field are sortable.
:return: the max value for the field in the stream identified by key. -1 if there are no entries for the key.
"""
result_it = self.timeseries_db.find(self._get_query([key], time_query),
{"_id": False, field: True}).sort(field, pymongo.DESCENDING).limit(1)
if result_it.count() == 0:
return -1
retVal = list(result_it)[0]
field_parts = field.split(".")
for part in field_parts:
retVal = retVal[part]
return retVal
def insert(self, entry):
"""
"""
logging.debug("insert called")
if "user_id" not in entry:
entry["user_id"] = self.user_id
elif entry["user_id"] != self.user_id:
raise AttributeError("Saving entry for %s in timeseries for %s" % (entry["user_id"], self.user_id))
else:
logging.debug("entry was fine, no need to fix it")
logging.debug("Inserting entry %s into timeseries" % entry)
self.timeseries_db.insert(entry)
def insert_error(self, entry):
"""
"""
logging.debug("insert_error called")
if "user_id" not in entry:
entry["user_id"] = self.user_id
elif entry["user_id"] != self.user_id:
raise AttributeError("Saving entry for %s in timeseries for %s" % (entry["user_id"], self.user_id))
else:
logging.debug("entry was fine, no need to fix it")
logging.debug("Inserting entry %s into error timeseries" % entry)
edb.get_timeseries_error_db().insert(entry)
| bsd-3-clause |
rishikksh20/scikit-learn | benchmarks/bench_tree.py | 131 | 3647 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import matplotlib.pyplot as plt
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
plt.figure('scikit-learn tree benchmark results')
plt.subplot(211)
plt.title('Learning with varying number of samples')
plt.plot(xx, scikit_classifier_results, 'g-', label='classification')
plt.plot(xx, scikit_regressor_results, 'r-', label='regression')
plt.legend(loc='upper left')
plt.xlabel('number of samples')
plt.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
plt.subplot(212)
plt.title('Learning in high dimensional spaces')
plt.plot(xx, scikit_classifier_results, 'g-', label='classification')
plt.plot(xx, scikit_regressor_results, 'r-', label='regression')
plt.legend(loc='upper left')
plt.xlabel('number of dimensions')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.show()
| bsd-3-clause |
diogo149/CauseEffectPairsPaper | configs/categorical_kmeans10_none.py | 1 | 7424 | import numpy as np
from scipy.stats import skew, kurtosis, shapiro, pearsonr, ansari, mood, levene, fligner, bartlett, mannwhitneyu
from scipy.spatial.distance import braycurtis, canberra, chebyshev, cityblock, correlation, cosine, euclidean, hamming, jaccard, kulsinski, matching, russellrao, sqeuclidean
from sklearn.preprocessing import LabelBinarizer
from sklearn.linear_model import Ridge, LinearRegression, LogisticRegression
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, RandomForestClassifier, GradientBoostingClassifier
from sklearn.neighbors import KNeighborsRegressor, KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import explained_variance_score, mean_absolute_error, mean_squared_error, r2_score, accuracy_score, roc_auc_score, average_precision_score, f1_score, hinge_loss, matthews_corrcoef, precision_score, recall_score, zero_one_loss
from sklearn.metrics.cluster import adjusted_mutual_info_score, adjusted_rand_score, completeness_score, homogeneity_completeness_v_measure, homogeneity_score, mutual_info_score, normalized_mutual_info_score, v_measure_score
from boomlet.utils.aggregators import to_aggregator
from boomlet.metrics import max_error, error_variance, relative_error_variance, gini_loss, categorical_gini_loss
from boomlet.transform.type_conversion import Discretizer
from autocause.feature_functions import *
from autocause.converters import NUMERICAL_TO_NUMERICAL, NUMERICAL_TO_CATEGORICAL, BINARY_TO_NUMERICAL, BINARY_TO_CATEGORICAL, CATEGORICAL_TO_NUMERICAL, CATEGORICAL_TO_CATEGORICAL
"""
Functions used to combine a list of features into one coherent one.
Sample use:
1. to convert categorical to numerical, we perform a one hot encoding
2. treat each binary column as a separate numerical feature
3. compute numerical features as usual
4. use each of the following functions to create a new feature
(with the input as the nth feature for each of the columns)
WARNING: these will be used in various locations throughout the code base
and will result in feature size growing at faster than a linear rate
"""
AGGREGATORS = [
to_aggregator("max"),
to_aggregator("min"),
to_aggregator("median"),
to_aggregator("mode"),
to_aggregator("mean"),
# to_aggregator("sum"),
]
"""
Boolean flags specifying whether or not to perform conversions
"""
CONVERT_TO_NUMERICAL = False
CONVERT_TO_CATEGORICAL = True
"""
Functions that compute a metric on a single 1-D array
"""
UNARY_NUMERICAL_FEATURES = [
normalized_entropy,
skew,
kurtosis,
np.std,
shapiro,
]
UNARY_CATEGORICAL_FEATURES = [
lambda x: len(set(x)), # number of unique
]
"""
Functions that compute a metric on two 1-D arrays
"""
BINARY_NN_FEATURES = [
independent_component,
chi_square,
pearsonr,
correlation_magnitude,
braycurtis,
canberra,
chebyshev,
cityblock,
correlation,
cosine,
euclidean,
hamming,
sqeuclidean,
ansari,
mood,
levene,
fligner,
bartlett,
mannwhitneyu,
]
BINARY_NC_FEATURES = [
]
BINARY_CN_FEATURES = [
categorical_numerical_homogeneity,
bucket_variance,
anova,
]
BINARY_CC_FEATURES = [
categorical_categorical_homogeneity,
anova,
dice_,
jaccard,
kulsinski,
matching,
rogerstanimoto_,
russellrao,
sokalmichener_,
sokalsneath_,
yule_,
adjusted_mutual_info_score,
adjusted_rand_score,
completeness_score,
homogeneity_completeness_v_measure,
homogeneity_score,
mutual_info_score,
normalized_mutual_info_score,
v_measure_score,
]
"""
Dictionaries of input type (e.g. B corresponds to pairs where binary
data is the input) to pairs of converter functions and a boolean flag
of whether or not to aggregate over the output of the converter function
converter functions should have the type signature:
converter(X_raw, X_current_type, Y_raw, Y_type)
where X_raw is the data to convert
"""
NUMERICAL_CONVERTERS = dict(
N=NUMERICAL_TO_NUMERICAL["identity"],
B=BINARY_TO_NUMERICAL["identity"],
C=CATEGORICAL_TO_NUMERICAL["binarize"],
)
CATEGORICAL_CONVERTERS = dict(
N=NUMERICAL_TO_CATEGORICAL["kmeans10"],
B=BINARY_TO_CATEGORICAL["identity"],
C=CATEGORICAL_TO_CATEGORICAL["identity"],
)
"""
Whether or not the converters can result in a 2D output. This must be set to True
if any of the respective converts can return a 2D output.
"""
NUMERICAL_CAN_BE_2D = True
CATEGORICAL_CAN_BE_2D = False
"""
Estimators used to provide a fit for a variable
"""
REGRESSION_ESTIMATORS = [
Ridge(),
LinearRegression(),
DecisionTreeRegressor(random_state=0),
RandomForestRegressor(random_state=0),
GradientBoostingRegressor(subsample=0.5, n_estimators=10, random_state=0),
KNeighborsRegressor(),
]
CLASSIFICATION_ESTIMATORS = [
# LogisticRegression(random_state=0),
# DecisionTreeClassifier(random_state=0),
# RandomForestClassifier(random_state=0),
# GradientBoostingClassifier(subsample=0.5, n_estimators=10, random_state=0),
# KNeighborsClassifier(),
# GaussianNB(),
]
"""
Functions to provide a value of how good a fit on a variable is
"""
REGRESSION_METRICS = [
explained_variance_score,
mean_absolute_error,
mean_squared_error,
r2_score,
max_error,
error_variance,
relative_error_variance,
gini_loss,
] + BINARY_NN_FEATURES
REGRESSION_RESIDUAL_METRICS = [
] + UNARY_NUMERICAL_FEATURES
BINARY_PROBABILITY_CLASSIFICATION_METRICS = [
roc_auc_score,
hinge_loss,
] + REGRESSION_METRICS
RESIDUAL_PROBABILITY_CLASSIFICATION_METRICS = [
] + REGRESSION_RESIDUAL_METRICS
BINARY_CLASSIFICATION_METRICS = [
accuracy_score,
average_precision_score,
f1_score,
matthews_corrcoef,
precision_score,
recall_score,
zero_one_loss,
categorical_gini_loss,
]
ND_CLASSIFICATION_METRICS = [ # metrics for N-dimensional classification
] + BINARY_CC_FEATURES
"""
Functions to assess the model (e.g. complexity) of the fit on a numerical variable
of type signature:
metric(clf, X, y)
"""
REGRESSION_MODEL_METRICS = [
# TODO model complexity metrics
]
CLASSIFICATION_MODEL_METRICS = [
# TODO use regression model metrics on predict_proba
]
"""
The operations to perform on the A->B features and B->A features.
"""
RELATIVE_FEATURES = [
# Identity functions, comment out the next 2 lines for only relative features
lambda x, y: x,
lambda x, y: y,
lambda x, y: x - y,
]
"""
Whether or not to treat each observation (A,B) as two observations: (A,B) and (B,A)
If this is done and training labels are given, those labels will have to be
reflected as well. The reflection is performed through appending at the end.
(e.g. if we have N training examples, observation N+1 in the output will be
the first example reflected)
"""
REFLECT_DATA = False
"""
Whether or not metafeatures based on the types of A and B are generated.
e.g. 1/0 feature on whether or not A is Numerical, etc.
"""
ADD_METAFEATURES = True
"""
Whether or not to generate combination features between the computed
features and metafeatures.
e.g. for each feature and metafeature, generate a new feature which is the
product of the two
WARNING: will generate a LOT of features (approximately 21 times as many)
"""
COMPUTE_METAFEATURE_COMBINATIONS = False
| mit |
zak-k/cartopy | lib/cartopy/crs.py | 1 | 70157 | # (C) British Crown Copyright 2011 - 2017, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""
The crs module defines Coordinate Reference Systems and the transformations
between them.
"""
from __future__ import (absolute_import, division, print_function)
from abc import ABCMeta, abstractproperty
import math
import warnings
import numpy as np
import shapely.geometry as sgeom
from shapely.prepared import prep
import six
from cartopy._crs import CRS, Geocentric, Geodetic, Globe, PROJ4_VERSION
import cartopy.trace
__document_these__ = ['CRS', 'Geocentric', 'Geodetic', 'Globe']
WGS84_SEMIMAJOR_AXIS = 6378137.0
WGS84_SEMIMINOR_AXIS = 6356752.3142
class RotatedGeodetic(CRS):
"""
Defines a rotated latitude/longitude coordinate system with spherical
topology and geographical distance.
Coordinates are measured in degrees.
"""
def __init__(self, pole_longitude, pole_latitude,
central_rotated_longitude=0.0, globe=None):
"""
Create a RotatedGeodetic CRS.
The class uses proj4 to perform an ob_tran operation, using the
pole_longitude to set a lon_0 then performing two rotations based on
pole_latitude and central_rotated_longitude.
This is equivalent to setting the new pole to a location defined by
the pole_latitude and pole_longitude values in the GeogCRS defined by
globe, then rotating this new CRS about it's pole using the
central_rotated_longitude value.
Args:
* pole_longitude - Pole longitude position, in unrotated degrees.
* pole_latitude - Pole latitude position, in unrotated degrees.
* central_rotated_longitude - Longitude rotation about the new
pole, in degrees.
Kwargs:
* globe - An optional :class:`cartopy.crs.Globe`.
Defaults to a "WGS84" datum.
"""
proj4_params = [('proj', 'ob_tran'), ('o_proj', 'latlon'),
('o_lon_p', central_rotated_longitude),
('o_lat_p', pole_latitude),
('lon_0', 180 + pole_longitude),
('to_meter', math.radians(1))]
globe = globe or Globe(datum='WGS84')
super(RotatedGeodetic, self).__init__(proj4_params, globe=globe)
class Projection(six.with_metaclass(ABCMeta, CRS)):
"""
Defines a projected coordinate system with flat topology and Euclidean
distance.
"""
_method_map = {
'Point': '_project_point',
'LineString': '_project_line_string',
'LinearRing': '_project_linear_ring',
'Polygon': '_project_polygon',
'MultiPoint': '_project_multipoint',
'MultiLineString': '_project_multiline',
'MultiPolygon': '_project_multipolygon',
}
@abstractproperty
def boundary(self):
pass
@abstractproperty
def threshold(self):
pass
@abstractproperty
def x_limits(self):
pass
@abstractproperty
def y_limits(self):
pass
@property
def cw_boundary(self):
try:
boundary = self._cw_boundary
except AttributeError:
boundary = sgeom.LineString(self.boundary)
self._cw_boundary = boundary
return boundary
@property
def ccw_boundary(self):
try:
boundary = self._ccw_boundary
except AttributeError:
boundary = sgeom.LineString(self.boundary.coords[::-1])
self._ccw_boundary = boundary
return boundary
@property
def domain(self):
try:
domain = self._domain
except AttributeError:
domain = self._domain = sgeom.Polygon(self.boundary)
return domain
def _as_mpl_axes(self):
import cartopy.mpl.geoaxes as geoaxes
return geoaxes.GeoAxes, {'map_projection': self}
def project_geometry(self, geometry, src_crs=None):
"""
Projects the given geometry into this projection.
:param geometry: The geometry to (re-)project.
:param src_crs: The source CRS, or geodetic CRS if None.
:rtype: Shapely geometry.
If src_crs is None, the source CRS is assumed to be a geodetic
version of the target CRS.
"""
if src_crs is None:
src_crs = self.as_geodetic()
elif not isinstance(src_crs, CRS):
raise TypeError('Source CRS must be an instance of CRS'
' or one of its subclasses, or None.')
geom_type = geometry.geom_type
method_name = self._method_map.get(geom_type)
if not method_name:
raise ValueError('Unsupported geometry '
'type {!r}'.format(geom_type))
return getattr(self, method_name)(geometry, src_crs)
def _project_point(self, point, src_crs):
return sgeom.Point(*self.transform_point(point.x, point.y, src_crs))
def _project_line_string(self, geometry, src_crs):
return cartopy.trace.project_linear(geometry, src_crs, self)
def _project_linear_ring(self, linear_ring, src_crs):
"""
Projects the given LinearRing from the src_crs into this CRS and
returns a list of LinearRings and a single MultiLineString.
"""
debug = False
# 1) Resolve the initial lines into projected segments
# 1abc
# def23ghi
# jkl41
multi_line_string = cartopy.trace.project_linear(linear_ring,
src_crs, self)
# Threshold for whether a point is close enough to be the same
# point as another.
threshold = max(np.abs(self.x_limits + self.y_limits)) * 1e-5
# 2) Simplify the segments where appropriate.
if len(multi_line_string) > 1:
# Stitch together segments which are close to continuous.
# This is important when:
# 1) The first source point projects into the map and the
# ring has been cut by the boundary.
# Continuing the example from above this gives:
# def23ghi
# jkl41abc
# 2) The cut ends of segments are too close to reliably
# place into an order along the boundary.
line_strings = list(multi_line_string)
any_modified = False
i = 0
if debug:
first_coord = np.array([ls.coords[0] for ls in line_strings])
last_coord = np.array([ls.coords[-1] for ls in line_strings])
print('Distance matrix:')
np.set_printoptions(precision=2)
x = first_coord[:, np.newaxis, :]
y = last_coord[np.newaxis, :, :]
print(np.abs(x - y).max(axis=-1))
while i < len(line_strings):
modified = False
j = 0
while j < len(line_strings):
if i != j and np.allclose(line_strings[i].coords[0],
line_strings[j].coords[-1],
atol=threshold):
if debug:
print('Joining together {} and {}.'.format(i, j))
last_coords = list(line_strings[j].coords)
first_coords = list(line_strings[i].coords)[1:]
combo = sgeom.LineString(last_coords + first_coords)
if j < i:
i, j = j, i
del line_strings[j], line_strings[i]
line_strings.append(combo)
modified = True
any_modified = True
break
else:
j += 1
if not modified:
i += 1
if any_modified:
multi_line_string = sgeom.MultiLineString(line_strings)
# 3) Check for rings that have been created by the projection stage.
rings = []
line_strings = []
for line in multi_line_string:
if len(line.coords) > 3 and np.allclose(line.coords[0],
line.coords[-1],
atol=threshold):
result_geometry = sgeom.LinearRing(line.coords[:-1])
rings.append(result_geometry)
else:
line_strings.append(line)
# If we found any rings, then we should re-create the multi-line str.
if rings:
multi_line_string = sgeom.MultiLineString(line_strings)
return rings, multi_line_string
def _project_multipoint(self, geometry, src_crs):
geoms = []
for geom in geometry.geoms:
geoms.append(self._project_point(geom, src_crs))
if geoms:
return sgeom.MultiPoint(geoms)
else:
return sgeom.MultiPoint()
def _project_multiline(self, geometry, src_crs):
geoms = []
for geom in geometry.geoms:
r = self._project_line_string(geom, src_crs)
if r:
geoms.extend(r.geoms)
if geoms:
return sgeom.MultiLineString(geoms)
else:
return []
def _project_multipolygon(self, geometry, src_crs):
geoms = []
for geom in geometry.geoms:
r = self._project_polygon(geom, src_crs)
if r:
geoms.extend(r.geoms)
if geoms:
result = sgeom.MultiPolygon(geoms)
else:
result = sgeom.MultiPolygon()
return result
def _project_polygon(self, polygon, src_crs):
"""
Returns the projected polygon(s) derived from the given polygon.
"""
# Determine orientation of polygon.
# TODO: Consider checking the internal rings have the opposite
# orientation to the external rings?
if src_crs.is_geodetic():
is_ccw = True
else:
is_ccw = polygon.exterior.is_ccw
# Project the polygon exterior/interior rings.
# Each source ring will result in either a ring, or one or more
# lines.
rings = []
multi_lines = []
for src_ring in [polygon.exterior] + list(polygon.interiors):
p_rings, p_mline = self._project_linear_ring(src_ring, src_crs)
if p_rings:
rings.extend(p_rings)
if len(p_mline) > 0:
multi_lines.append(p_mline)
# Convert any lines to rings by attaching them to the boundary.
if multi_lines:
rings.extend(self._attach_lines_to_boundary(multi_lines, is_ccw))
# Resolve all the inside vs. outside rings, and convert to the
# final MultiPolygon.
return self._rings_to_multi_polygon(rings, is_ccw)
def _attach_lines_to_boundary(self, multi_line_strings, is_ccw):
"""
Returns a list of LinearRings by attaching the ends of the given lines
to the boundary, paying attention to the traversal directions of the
lines and boundary.
"""
debug = False
debug_plot_edges = False
# Accumulate all the boundary and segment end points, along with
# their distance along the boundary.
edge_things = []
# Get the boundary as a LineString of the correct orientation
# so we can compute distances along it.
if is_ccw:
boundary = self.ccw_boundary
else:
boundary = self.cw_boundary
def boundary_distance(xy):
return boundary.project(sgeom.Point(*xy))
# Squash all the LineStrings into a single list.
line_strings = []
for multi_line_string in multi_line_strings:
line_strings.extend(multi_line_string)
# Record the positions of all the segment ends
for i, line_string in enumerate(line_strings):
first_dist = boundary_distance(line_string.coords[0])
thing = _BoundaryPoint(first_dist, False,
(i, 'first', line_string.coords[0]))
edge_things.append(thing)
last_dist = boundary_distance(line_string.coords[-1])
thing = _BoundaryPoint(last_dist, False,
(i, 'last', line_string.coords[-1]))
edge_things.append(thing)
# Record the positions of all the boundary vertices
for xy in boundary.coords[:-1]:
point = sgeom.Point(*xy)
dist = boundary.project(point)
thing = _BoundaryPoint(dist, True, point)
edge_things.append(thing)
if debug_plot_edges:
import matplotlib.pyplot as plt
current_fig = plt.gcf()
fig = plt.figure()
# Reset the current figure so we don't upset anything.
plt.figure(current_fig.number)
ax = fig.add_subplot(1, 1, 1)
# Order everything as if walking around the boundary.
# NB. We make line end-points take precedence over boundary points
# to ensure that end-points are still found and followed when they
# coincide.
edge_things.sort(key=lambda thing: (thing.distance, thing.kind))
remaining_ls = dict(enumerate(line_strings))
prev_thing = None
for edge_thing in edge_things[:]:
if (prev_thing is not None and
not edge_thing.kind and
not prev_thing.kind and
edge_thing.data[0] == prev_thing.data[0]):
j = edge_thing.data[0]
# Insert a edge boundary point in between this geometry.
mid_dist = (edge_thing.distance + prev_thing.distance) * 0.5
mid_point = boundary.interpolate(mid_dist)
new_thing = _BoundaryPoint(mid_dist, True, mid_point)
if debug:
print('Artificially insert boundary: {}'.format(new_thing))
ind = edge_things.index(edge_thing)
edge_things.insert(ind, new_thing)
prev_thing = None
else:
prev_thing = edge_thing
if debug:
print()
print('Edge things')
for thing in edge_things:
print(' ', thing)
if debug_plot_edges:
for thing in edge_things:
if isinstance(thing.data, sgeom.Point):
ax.plot(*thing.data.xy, marker='o')
else:
ax.plot(*thing.data[2], marker='o')
ls = line_strings[thing.data[0]]
coords = np.array(ls.coords)
ax.plot(coords[:, 0], coords[:, 1])
ax.text(coords[0, 0], coords[0, 1], thing.data[0])
ax.text(coords[-1, 0], coords[-1, 1],
'{}.'.format(thing.data[0]))
processed_ls = []
while remaining_ls:
# Rename line_string to current_ls
i, current_ls = remaining_ls.popitem()
if debug:
import sys
sys.stdout.write('+')
sys.stdout.flush()
print()
print('Processing: %s, %s' % (i, current_ls))
# We only want to consider boundary-points, the starts-and-ends of
# all other line-strings, or the start-point of the current
# line-string.
def filter_fn(t):
return (t.kind or
t.data[0] != i or
t.data[1] != 'last')
edge_things = list(filter(filter_fn, edge_things))
added_linestring = set()
while True:
# Find out how far around this linestring's last
# point is on the boundary. We will use this to find
# the next point on the boundary.
d_last = boundary_distance(current_ls.coords[-1])
if debug:
print(' d_last: {!r}'.format(d_last))
next_thing = _find_first_gt(edge_things, d_last)
# Remove this boundary point from the edge.
edge_things.remove(next_thing)
if debug:
print(' next_thing:', next_thing)
if next_thing.kind:
# We've just got a boundary point, add it, and keep going.
if debug:
print(' adding boundary point')
boundary_point = next_thing.data
combined_coords = (list(current_ls.coords) +
[(boundary_point.x, boundary_point.y)])
current_ls = sgeom.LineString(combined_coords)
elif next_thing.data[0] == i and next_thing.data[1] == 'first':
# We've gone all the way around and are now back at the
# first boundary thing.
if debug:
print(' close loop')
processed_ls.append(current_ls)
if debug_plot_edges:
coords = np.array(current_ls.coords)
ax.plot(coords[:, 0], coords[:, 1], color='black',
linestyle='--')
break
else:
if debug:
print(' adding line')
j = next_thing.data[0]
line_to_append = line_strings[j]
if j in remaining_ls:
remaining_ls.pop(j)
coords_to_append = list(line_to_append.coords)
if next_thing.data[1] == 'last':
coords_to_append = coords_to_append[::-1]
# Build up the linestring.
current_ls = sgeom.LineString((list(current_ls.coords) +
coords_to_append))
# Catch getting stuck in an infinite loop by checking that
# linestring only added once.
if j not in added_linestring:
added_linestring.add(j)
else:
if debug_plot_edges:
plt.show()
raise RuntimeError('Unidentified problem with '
'geometry, linestring being '
're-added. Please raise an issue.')
# filter out any non-valid linear rings
processed_ls = [linear_ring for linear_ring in processed_ls if
len(linear_ring.coords) > 2]
linear_rings = [sgeom.LinearRing(line) for line in processed_ls]
if debug:
print(' DONE')
return linear_rings
def _rings_to_multi_polygon(self, rings, is_ccw):
exterior_rings = []
interior_rings = []
for ring in rings:
if ring.is_ccw != is_ccw:
interior_rings.append(ring)
else:
exterior_rings.append(ring)
polygon_bits = []
# Turn all the exterior rings into polygon definitions,
# "slurping up" any interior rings they contain.
for exterior_ring in exterior_rings:
polygon = sgeom.Polygon(exterior_ring)
prep_polygon = prep(polygon)
holes = []
for interior_ring in interior_rings[:]:
if prep_polygon.contains(interior_ring):
holes.append(interior_ring)
interior_rings.remove(interior_ring)
elif polygon.crosses(interior_ring):
# Likely that we have an invalid geometry such as
# that from #509 or #537.
holes.append(interior_ring)
interior_rings.remove(interior_ring)
polygon_bits.append((exterior_ring.coords,
[ring.coords for ring in holes]))
# Any left over "interior" rings need "inverting" with respect
# to the boundary.
if interior_rings:
boundary_poly = self.domain
x3, y3, x4, y4 = boundary_poly.bounds
bx = (x4 - x3) * 0.1
by = (y4 - y3) * 0.1
x3 -= bx
y3 -= by
x4 += bx
y4 += by
for ring in interior_rings:
polygon = sgeom.Polygon(ring)
if polygon.is_valid:
x1, y1, x2, y2 = polygon.bounds
bx = (x2 - x1) * 0.1
by = (y2 - y1) * 0.1
x1 -= bx
y1 -= by
x2 += bx
y2 += by
box = sgeom.box(min(x1, x3), min(y1, y3),
max(x2, x4), max(y2, y4))
# Invert the polygon
polygon = box.difference(polygon)
# Intersect the inverted polygon with the boundary
polygon = boundary_poly.intersection(polygon)
if not polygon.is_empty:
polygon_bits.append(polygon)
if polygon_bits:
multi_poly = sgeom.MultiPolygon(polygon_bits)
else:
multi_poly = sgeom.MultiPolygon()
return multi_poly
def quick_vertices_transform(self, vertices, src_crs):
"""
Where possible, return a vertices array transformed to this CRS from
the given vertices array of shape ``(n, 2)`` and the source CRS.
.. important::
This method may return None to indicate that the vertices cannot
be transformed quickly, and a more complex geometry transformation
is required (see :meth:`cartopy.crs.Projection.project_geometry`).
"""
return_value = None
if self == src_crs:
x = vertices[:, 0]
y = vertices[:, 1]
x_limits = self.x_limits
y_limits = self.y_limits
if (x.min() >= x_limits[0] and x.max() <= x_limits[1] and
y.min() >= y_limits[0] and y.max() <= y_limits[1]):
return_value = vertices
return return_value
class _RectangularProjection(Projection):
"""
The abstract superclass of projections with a rectangular domain which
is symmetric about the origin.
"""
def __init__(self, proj4_params, half_width, half_height, globe=None):
self._half_width = half_width
self._half_height = half_height
super(_RectangularProjection, self).__init__(proj4_params, globe=globe)
@property
def boundary(self):
# XXX Should this be a LinearRing?
w, h = self._half_width, self._half_height
return sgeom.LineString([(-w, -h), (-w, h), (w, h), (w, -h), (-w, -h)])
@property
def x_limits(self):
return (-self._half_width, self._half_width)
@property
def y_limits(self):
return (-self._half_height, self._half_height)
class _CylindricalProjection(_RectangularProjection):
"""
The abstract class which denotes cylindrical projections where we
want to allow x values to wrap around.
"""
def _ellipse_boundary(semimajor=2, semiminor=1, easting=0, northing=0, n=201):
"""
Defines a projection boundary using an ellipse.
This type of boundary is used by several projections.
"""
t = np.linspace(0, 2 * np.pi, n)
coords = np.vstack([semimajor * np.cos(t), semiminor * np.sin(t)])
coords += ([easting], [northing])
return coords[:, ::-1]
class PlateCarree(_CylindricalProjection):
def __init__(self, central_longitude=0.0, globe=None):
proj4_params = [('proj', 'eqc'), ('lon_0', central_longitude)]
if globe is None:
globe = Globe(semimajor_axis=math.degrees(1))
a_rad = math.radians(globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
x_max = a_rad * 180
y_max = a_rad * 90
# Set the threshold around 0.5 if the x max is 180.
self._threshold = x_max / 360.
super(PlateCarree, self).__init__(proj4_params, x_max, y_max,
globe=globe)
@property
def threshold(self):
return self._threshold
def _bbox_and_offset(self, other_plate_carree):
"""
Returns a pair of (xmin, xmax) pairs and an offset which can be used
for identification of whether data in ``other_plate_carree`` needs
to be transformed to wrap appropriately.
>>> import cartopy.crs as ccrs
>>> src = ccrs.PlateCarree(central_longitude=10)
>>> bboxes, offset = ccrs.PlateCarree()._bbox_and_offset(src)
>>> print(bboxes)
[[-180.0, -170.0], [-170.0, 180.0]]
>>> print(offset)
10.0
The returned values are longitudes in ``other_plate_carree``'s
coordinate system.
.. important::
The two CRSs must be identical in every way, other than their
central longitudes. No checking of this is done.
"""
self_lon_0 = self.proj4_params['lon_0']
other_lon_0 = other_plate_carree.proj4_params['lon_0']
lon_0_offset = other_lon_0 - self_lon_0
lon_lower_bound_0 = self.x_limits[0]
lon_lower_bound_1 = (other_plate_carree.x_limits[0] + lon_0_offset)
if lon_lower_bound_1 < self.x_limits[0]:
lon_lower_bound_1 += np.diff(self.x_limits)[0]
lon_lower_bound_0, lon_lower_bound_1 = sorted(
[lon_lower_bound_0, lon_lower_bound_1])
bbox = [[lon_lower_bound_0, lon_lower_bound_1],
[lon_lower_bound_1, lon_lower_bound_0]]
bbox[1][1] += np.diff(self.x_limits)[0]
return bbox, lon_0_offset
def quick_vertices_transform(self, vertices, src_crs):
return_value = super(PlateCarree,
self).quick_vertices_transform(vertices, src_crs)
# Optimise the PlateCarree -> PlateCarree case where no
# wrapping or interpolation needs to take place.
if return_value is None and isinstance(src_crs, PlateCarree):
self_params = self.proj4_params.copy()
src_params = src_crs.proj4_params.copy()
self_params.pop('lon_0'), src_params.pop('lon_0')
xs, ys = vertices[:, 0], vertices[:, 1]
potential = (self_params == src_params and
self.y_limits[0] <= ys.min() and
self.y_limits[1] >= ys.max())
if potential:
mod = np.diff(src_crs.x_limits)[0]
bboxes, proj_offset = self._bbox_and_offset(src_crs)
x_lim = xs.min(), xs.max()
y_lim = ys.min(), ys.max()
for poly in bboxes:
# Arbitrarily choose the number of moduli to look
# above and below the -180->180 range. If data is beyond
# this range, we're not going to transform it quickly.
for i in [-1, 0, 1, 2]:
offset = mod * i - proj_offset
if ((poly[0] + offset) <= x_lim[0] and
(poly[1] + offset) >= x_lim[1]):
return_value = vertices + [[-offset, 0]]
break
if return_value is not None:
break
return return_value
class TransverseMercator(Projection):
"""
A Transverse Mercator projection.
"""
def __init__(self, central_longitude=0.0, central_latitude=0.0,
false_easting=0.0, false_northing=0.0,
scale_factor=1.0, globe=None):
"""
Kwargs:
* central_longitude - The true longitude of the central meridian in
degrees. Defaults to 0.
* central_latitude - The true latitude of the planar origin in
degrees. Defaults to 0.
* false_easting - X offset from the planar origin in metres.
Defaults to 0.
* false_northing - Y offset from the planar origin in metres.
Defaults to 0.
* scale_factor - Scale factor at the central meridian. Defaults
to 1.
* globe - An instance of :class:`cartopy.crs.Globe`. If omitted, a
default globe is created.
"""
proj4_params = [('proj', 'tmerc'), ('lon_0', central_longitude),
('lat_0', central_latitude), ('k', scale_factor),
('x_0', false_easting), ('y_0', false_northing),
('units', 'm')]
super(TransverseMercator, self).__init__(proj4_params, globe=globe)
@property
def threshold(self):
return 1e4
@property
def boundary(self):
x0, x1 = self.x_limits
y0, y1 = self.y_limits
return sgeom.LineString([(x0, y0), (x0, y1),
(x1, y1), (x1, y0),
(x0, y0)])
@property
def x_limits(self):
return (-2e7, 2e7)
@property
def y_limits(self):
return (-1e7, 1e7)
class OSGB(TransverseMercator):
def __init__(self):
super(OSGB, self).__init__(central_longitude=-2, central_latitude=49,
scale_factor=0.9996012717,
false_easting=400000,
false_northing=-100000,
globe=Globe(datum='OSGB36', ellipse='airy'))
@property
def boundary(self):
w = self.x_limits[1] - self.x_limits[0]
h = self.y_limits[1] - self.y_limits[0]
return sgeom.LineString([(0, 0), (0, h), (w, h), (w, 0), (0, 0)])
@property
def x_limits(self):
return (0, 7e5)
@property
def y_limits(self):
return (0, 13e5)
class OSNI(TransverseMercator):
def __init__(self):
globe = Globe(semimajor_axis=6377340.189,
semiminor_axis=6356034.447938534)
super(OSNI, self).__init__(central_longitude=-8,
central_latitude=53.5,
scale_factor=1.000035,
false_easting=200000,
false_northing=250000,
globe=globe)
@property
def boundary(self):
w = self.x_limits[1] - self.x_limits[0]
h = self.y_limits[1] - self.y_limits[0]
return sgeom.LineString([(0, 0), (0, h), (w, h), (w, 0), (0, 0)])
@property
def x_limits(self):
return (18814.9667, 386062.3293)
@property
def y_limits(self):
return (11764.8481, 464720.9559)
class UTM(Projection):
"""
Universal Transverse Mercator projection.
"""
def __init__(self, zone, southern_hemisphere=False, globe=None):
"""
Kwargs:
* zone - the numeric zone of the UTM required.
* globe - An instance of :class:`cartopy.crs.Globe`. If omitted, a
default globe is created.
* southern_hemisphere - set to True if the zone is in the southern
hemisphere, defaults to False.
"""
proj4_params = [('proj', 'utm'),
('units', 'm'),
('zone', zone)]
if southern_hemisphere:
proj4_params.append(('south', None))
super(UTM, self).__init__(proj4_params, globe=globe)
@property
def boundary(self):
x0, x1 = self.x_limits
y0, y1 = self.y_limits
return sgeom.LineString([(x0, y0), (x0, y1),
(x1, y1), (x1, y0),
(x0, y0)])
@property
def threshold(self):
return 1e2
@property
def x_limits(self):
easting = 5e5
# allow 50% overflow
return (0 - easting/2, 2 * easting + easting/2)
@property
def y_limits(self):
northing = 1e7
# allow 50% overflow
return (0 - northing, 2 * northing + northing/2)
class EuroPP(UTM):
"""
UTM Zone 32 projection for EuroPP domain.
Ellipsoid is International 1924, Datum is ED50.
"""
def __init__(self):
globe = Globe(ellipse='intl')
super(EuroPP, self).__init__(32, globe=globe)
@property
def x_limits(self):
return (-1.4e6, 2e6)
@property
def y_limits(self):
return (4e6, 7.9e6)
class Mercator(Projection):
"""
A Mercator projection.
"""
def __init__(self, central_longitude=0.0,
min_latitude=-80.0, max_latitude=84.0,
globe=None, latitude_true_scale=0.0):
"""
Kwargs:
* central_longitude - the central longitude. Defaults to 0.
* min_latitude - the maximum southerly extent of the projection.
Defaults to -80 degrees.
* max_latitude - the maximum northerly extent of the projection.
Defaults to 84 degrees.
* globe - A :class:`cartopy.crs.Globe`.
If omitted, a default globe is created.
* latitude_true_scale - the latitude where the scale is 1.
Defaults to 0 degrees.
"""
proj4_params = [('proj', 'merc'),
('lon_0', central_longitude),
('lat_ts', latitude_true_scale),
('units', 'm')]
super(Mercator, self).__init__(proj4_params, globe=globe)
# Calculate limits.
limits = self.transform_points(Geodetic(),
np.array([-180,
180]) + central_longitude,
np.array([min_latitude, max_latitude]))
self._xlimits = tuple(limits[..., 0])
self._ylimits = tuple(limits[..., 1])
self._threshold = np.diff(self.x_limits)[0] / 720
def __eq__(self, other):
res = super(Mercator, self).__eq__(other)
if hasattr(other, "_ylimits") and hasattr(other, "_xlimits"):
res = res and self._ylimits == other._ylimits and \
self._xlimits == other._xlimits
return res
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.proj4_init, self._xlimits, self._ylimits))
@property
def threshold(self):
return self._threshold
@property
def boundary(self):
x0, x1 = self.x_limits
y0, y1 = self.y_limits
return sgeom.LineString([(x0, y0), (x0, y1),
(x1, y1), (x1, y0),
(x0, y0)])
@property
def x_limits(self):
return self._xlimits
@property
def y_limits(self):
return self._ylimits
# Define a specific instance of a Mercator projection, the Google mercator.
Mercator.GOOGLE = Mercator(min_latitude=-85.0511287798066,
max_latitude=85.0511287798066,
globe=Globe(ellipse=None,
semimajor_axis=WGS84_SEMIMAJOR_AXIS,
semiminor_axis=WGS84_SEMIMAJOR_AXIS,
nadgrids='@null'))
# Deprecated form
GOOGLE_MERCATOR = Mercator.GOOGLE
class LambertCylindrical(_RectangularProjection):
def __init__(self, central_longitude=0.0):
proj4_params = [('proj', 'cea'), ('lon_0', central_longitude)]
globe = Globe(semimajor_axis=math.degrees(1))
super(LambertCylindrical, self).__init__(proj4_params, 180,
math.degrees(1), globe=globe)
@property
def threshold(self):
return 0.5
class LambertConformal(Projection):
"""
A Lambert Conformal conic projection.
"""
def __init__(self, central_longitude=-96.0, central_latitude=39.0,
false_easting=0.0, false_northing=0.0,
secant_latitudes=None, standard_parallels=None,
globe=None, cutoff=-30):
"""
Kwargs:
* central_longitude - The central longitude. Defaults to -96.
* central_latitude - The central latitude. Defaults to 39.
* false_easting - X offset from planar origin in metres.
Defaults to 0.
* false_northing - Y offset from planar origin in metres.
Defaults to 0.
* standard_parallels - Standard parallel latitude(s).
Defaults to (33, 45).
* globe - A :class:`cartopy.crs.Globe`.
If omitted, a default globe is created.
* cutoff - Latitude of map cutoff.
The map extends to infinity opposite the central pole
so we must cut off the map drawing before then.
A value of 0 will draw half the globe. Defaults to -30.
"""
proj4_params = [('proj', 'lcc'),
('lon_0', central_longitude),
('lat_0', central_latitude),
('x_0', false_easting),
('y_0', false_northing)]
if secant_latitudes and standard_parallels:
raise TypeError('standard_parallels replaces secant_latitudes.')
elif secant_latitudes is not None:
warnings.warn('secant_latitudes has been deprecated in v0.12. '
'The standard_parallels keyword can be used as a '
'direct replacement.')
standard_parallels = secant_latitudes
elif standard_parallels is None:
# The default. Put this as a keyword arg default once
# secant_latitudes is removed completely.
standard_parallels = (33, 45)
n_parallels = len(standard_parallels)
if not 1 <= n_parallels <= 2:
raise ValueError('1 or 2 standard parallels must be specified. '
'Got {} ({})'.format(n_parallels,
standard_parallels))
proj4_params.append(('lat_1', standard_parallels[0]))
if n_parallels == 2:
proj4_params.append(('lat_2', standard_parallels[1]))
super(LambertConformal, self).__init__(proj4_params, globe=globe)
# Compute whether this projection is at the "north pole" or the
# "south pole" (after the central lon/lat have been taken into
# account).
if n_parallels == 1:
plat = 90 if standard_parallels[0] > 0 else -90
else:
# Which pole are the parallels closest to? That is the direction
# that the cone converges.
if abs(standard_parallels[0]) > abs(standard_parallels[1]):
poliest_sec = standard_parallels[0]
else:
poliest_sec = standard_parallels[1]
plat = 90 if poliest_sec > 0 else -90
self.cutoff = cutoff
n = 91
lons = [0]
lats = [plat]
lons.extend(np.linspace(central_longitude - 180 + 0.001,
central_longitude + 180 - 0.001, n))
lats.extend(np.array([cutoff] * n))
lons.append(0)
lats.append(plat)
points = self.transform_points(PlateCarree(),
np.array(lons), np.array(lats))
if plat == 90:
# Ensure clockwise
points = points[::-1, :]
self._boundary = sgeom.LineString(points)
bounds = self._boundary.bounds
self._x_limits = bounds[0], bounds[2]
self._y_limits = bounds[1], bounds[3]
def __eq__(self, other):
res = super(LambertConformal, self).__eq__(other)
if hasattr(other, "cutoff"):
res = res and self.cutoff == other.cutoff
return res
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.proj4_init, self.cutoff))
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return 1e5
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class LambertAzimuthalEqualArea(Projection):
"""
A Lambert Azimuthal Equal-Area projection.
"""
def __init__(self, central_longitude=0.0, central_latitude=0.0,
false_easting=0.0, false_northing=0.0,
globe=None):
"""
Kwargs:
* central_longitude - The central longitude. Defaults to 0.
* central_latitude - The central latitude. Defaults to 0.
* false_easting - X offset from planar origin in metres.
Defaults to 0.
* false_northing - Y offset from planar origin in metres.
Defaults to 0.
* globe - A :class:`cartopy.crs.Globe`.
If omitted, a default globe is created.
"""
proj4_params = [('proj', 'laea'),
('lon_0', central_longitude),
('lat_0', central_latitude),
('x_0', false_easting),
('y_0', false_northing)]
super(LambertAzimuthalEqualArea, self).__init__(proj4_params,
globe=globe)
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
b = np.float(self.globe.semiminor_axis or WGS84_SEMIMINOR_AXIS)
lon, lat = central_longitude + 180, - central_latitude + 0.01
x, max_y = self.transform_point(lon, lat, PlateCarree())
coords = _ellipse_boundary(a * 1.9999, max_y - false_northing,
false_easting, false_northing, 61)
self._boundary = sgeom.polygon.LinearRing(coords.T)
self._x_limits = self._boundary.bounds[::2]
self._y_limits = self._boundary.bounds[1::2]
self._threshold = np.diff(self._x_limits)[0] * 1e-3
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class Miller(_RectangularProjection):
def __init__(self, central_longitude=0.0):
proj4_params = [('proj', 'mill'), ('lon_0', central_longitude)]
globe = Globe(semimajor_axis=math.degrees(1))
# XXX How can we derive the vertical limit of 131.98?
super(Miller, self).__init__(proj4_params, 180, 131.98, globe=globe)
@property
def threshold(self):
return 0.5
class RotatedPole(_CylindricalProjection):
"""
Defines a rotated latitude/longitude projected coordinate system
with cylindrical topology and projected distance.
Coordinates are measured in projection metres.
"""
def __init__(self, pole_longitude=0.0, pole_latitude=90.0,
central_rotated_longitude=0.0, globe=None):
"""
Create a RotatedPole CRS.
The class uses proj4 to perform an ob_tran operation, using the
pole_longitude to set a lon_0 then performing two rotations based on
pole_latitude and central_rotated_longitude.
This is equivalent to setting the new pole to a location defined by
the pole_latitude and pole_longitude values in the GeogCRS defined by
globe, then rotating this new CRS about it's pole using the
central_rotated_longitude value.
Args:
* pole_longitude - Pole longitude position, in unrotated degrees.
* pole_latitude - Pole latitude position, in unrotated degrees.
* central_rotated_longitude - Longitude rotation about the new
pole, in degrees.
Kwargs:
* globe - An optional :class:`cartopy.crs.Globe`.
Defaults to a "WGS84" datum.
"""
proj4_params = [('proj', 'ob_tran'), ('o_proj', 'latlon'),
('o_lon_p', central_rotated_longitude),
('o_lat_p', pole_latitude),
('lon_0', 180 + pole_longitude),
('to_meter', math.radians(1))]
super(RotatedPole, self).__init__(proj4_params, 180, 90, globe=globe)
@property
def threshold(self):
return 0.5
class Gnomonic(Projection):
def __init__(self, central_latitude=0.0,
central_longitude=0.0, globe=None):
proj4_params = [('proj', 'gnom'), ('lat_0', central_latitude),
('lon_0', central_longitude)]
super(Gnomonic, self).__init__(proj4_params, globe=globe)
self._max = 5e7
@property
def boundary(self):
return sgeom.Point(0, 0).buffer(self._max).exterior
@property
def threshold(self):
return 1e5
@property
def x_limits(self):
return (-self._max, self._max)
@property
def y_limits(self):
return (-self._max, self._max)
class Stereographic(Projection):
def __init__(self, central_latitude=0.0, central_longitude=0.0,
false_easting=0.0, false_northing=0.0,
true_scale_latitude=None, globe=None):
proj4_params = [('proj', 'stere'), ('lat_0', central_latitude),
('lon_0', central_longitude),
('x_0', false_easting), ('y_0', false_northing)]
if true_scale_latitude:
proj4_params.append(('lat_ts', true_scale_latitude))
super(Stereographic, self).__init__(proj4_params, globe=globe)
# TODO: Let the globe return the semimajor axis always.
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
b = np.float(self.globe.semiminor_axis or WGS84_SEMIMINOR_AXIS)
# Note: The magic number has been picked to maintain consistent
# behaviour with a wgs84 globe. There is no guarantee that the scaling
# should even be linear.
x_axis_offset = 5e7 / WGS84_SEMIMAJOR_AXIS
y_axis_offset = 5e7 / WGS84_SEMIMINOR_AXIS
self._x_limits = (-a * x_axis_offset + false_easting,
a * x_axis_offset + false_easting)
self._y_limits = (-b * y_axis_offset + false_northing,
b * y_axis_offset + false_northing)
if self._x_limits[1] == self._y_limits[1]:
point = sgeom.Point(false_easting, false_northing)
self._boundary = point.buffer(self._x_limits[1]).exterior
else:
coords = _ellipse_boundary(self._x_limits[1], self._y_limits[1],
false_easting, false_northing, 91)
self._boundary = sgeom.LinearRing(coords.T)
self._threshold = np.diff(self._x_limits)[0] * 1e-3
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class NorthPolarStereo(Stereographic):
def __init__(self, central_longitude=0.0, globe=None):
super(NorthPolarStereo, self).__init__(
central_latitude=90,
central_longitude=central_longitude, globe=globe)
class SouthPolarStereo(Stereographic):
def __init__(self, central_longitude=0.0, globe=None):
super(SouthPolarStereo, self).__init__(
central_latitude=-90,
central_longitude=central_longitude, globe=globe)
class Orthographic(Projection):
def __init__(self, central_longitude=0.0, central_latitude=0.0,
globe=None):
proj4_params = [('proj', 'ortho'), ('lon_0', central_longitude),
('lat_0', central_latitude)]
super(Orthographic, self).__init__(proj4_params, globe=globe)
# TODO: Let the globe return the semimajor axis always.
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
b = np.float(self.globe.semiminor_axis or a)
if b != a:
warnings.warn('The proj4 "ortho" projection does not appear to '
'handle elliptical globes.')
# To stabilise the projection of geometries, we reduce the boundary by
# a tiny fraction at the cost of the extreme edges.
coords = _ellipse_boundary(a * 0.99999, b * 0.99999, n=61)
self._boundary = sgeom.polygon.LinearRing(coords.T)
self._xlim = self._boundary.bounds[::2]
self._ylim = self._boundary.bounds[1::2]
self._threshold = np.diff(self._xlim)[0] * 0.02
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._xlim
@property
def y_limits(self):
return self._ylim
class _WarpedRectangularProjection(Projection):
def __init__(self, proj4_params, central_longitude, globe=None):
super(_WarpedRectangularProjection, self).__init__(proj4_params,
globe=globe)
# Obtain boundary points
points = []
n = 91
geodetic_crs = self.as_geodetic()
for lat in np.linspace(-90, 90, n):
points.append(
self.transform_point(180 + central_longitude,
lat, geodetic_crs)
)
for lat in np.linspace(90, -90, n):
points.append(
self.transform_point(-180 + central_longitude,
lat, geodetic_crs)
)
points.append(
self.transform_point(180 + central_longitude, -90, geodetic_crs))
self._boundary = sgeom.LineString(points[::-1])
x = [p[0] for p in points]
y = [p[1] for p in points]
self._x_limits = min(x), max(x)
self._y_limits = min(y), max(y)
@property
def boundary(self):
return self._boundary
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class Mollweide(_WarpedRectangularProjection):
def __init__(self, central_longitude=0, globe=None):
proj4_params = [('proj', 'moll'), ('lon_0', central_longitude)]
super(Mollweide, self).__init__(proj4_params, central_longitude,
globe=globe)
@property
def threshold(self):
return 1e5
class Robinson(_WarpedRectangularProjection):
def __init__(self, central_longitude=0, globe=None):
# Warn when using Robinson with proj4 4.8 due to discontinuity at
# 40 deg N introduced by incomplete fix to issue #113 (see
# https://trac.osgeo.org/proj/ticket/113).
import re
if PROJ4_VERSION != ():
if (4, 8) <= PROJ4_VERSION < (4, 9):
warnings.warn('The Robinson projection in the v4.8.x series '
'of Proj.4 contains a discontinuity at '
'40 deg latitude. Use this projection with '
'caution.')
else:
warnings.warn('Cannot determine Proj.4 version. The Robinson '
'projection may be unreliable and should be used '
'with caution.')
proj4_params = [('proj', 'robin'), ('lon_0', central_longitude)]
super(Robinson, self).__init__(proj4_params, central_longitude,
globe=globe)
@property
def threshold(self):
return 1e4
def transform_point(self, x, y, src_crs):
"""
Capture and handle any input NaNs, else invoke parent function,
:meth:`_WarpedRectangularProjection.transform_point`.
Needed because input NaNs can trigger a fatal error in the underlying
implementation of the Robinson projection.
.. note::
Although the original can in fact translate (nan, lat) into
(nan, y-value), this patched version doesn't support that.
"""
if np.isnan(x) or np.isnan(y):
result = (np.nan, np.nan)
else:
result = super(Robinson, self).transform_point(x, y, src_crs)
return result
def transform_points(self, src_crs, x, y, z=None):
"""
Capture and handle NaNs in input points -- else as parent function,
:meth:`_WarpedRectangularProjection.transform_points`.
Needed because input NaNs can trigger a fatal error in the underlying
implementation of the Robinson projection.
.. note::
Although the original can in fact translate (nan, lat) into
(nan, y-value), this patched version doesn't support that.
Instead, we invalidate any of the points that contain a NaN.
"""
input_point_nans = np.isnan(x) | np.isnan(y)
if z is not None:
input_point_nans |= np.isnan(z)
handle_nans = np.any(input_point_nans)
if handle_nans:
# Remove NaN points from input data to avoid the error.
x[input_point_nans] = 0.0
y[input_point_nans] = 0.0
if z is not None:
z[input_point_nans] = 0.0
result = super(Robinson, self).transform_points(src_crs, x, y, z)
if handle_nans:
# Result always has shape (N, 3).
# Blank out each (whole) point where we had a NaN in the input.
result[input_point_nans] = np.nan
return result
class InterruptedGoodeHomolosine(Projection):
def __init__(self, central_longitude=0, globe=None):
proj4_params = [('proj', 'igh'), ('lon_0', central_longitude)]
super(InterruptedGoodeHomolosine, self).__init__(proj4_params,
globe=globe)
# Obtain boundary points
points = []
n = 31
geodetic_crs = self.as_geodetic()
# Right boundary
for lat in np.linspace(-90, 90, n):
points.append(self.transform_point(180 + central_longitude,
lat, geodetic_crs))
# Top boundary
interrupted_lons = (-40.0,)
delta = 0.001
for lon in interrupted_lons:
for lat in np.linspace(90, 0, n):
points.append(self.transform_point(lon + delta +
central_longitude,
lat, geodetic_crs))
for lat in np.linspace(0, 90, n):
points.append(self.transform_point(lon - delta +
central_longitude,
lat, geodetic_crs))
# Left boundary
for lat in np.linspace(90, -90, n):
points.append(self.transform_point(-180 + central_longitude,
lat, geodetic_crs))
# Bottom boundary
interrupted_lons = (-100.0, -20.0, 80.0)
delta = 0.001
for lon in interrupted_lons:
for lat in np.linspace(-90, 0, n):
points.append(self.transform_point(lon - delta +
central_longitude,
lat, geodetic_crs))
for lat in np.linspace(0, -90, n):
points.append(self.transform_point(lon + delta +
central_longitude,
lat, geodetic_crs))
# Close loop
points.append(self.transform_point(180 + central_longitude, -90,
geodetic_crs))
self._boundary = sgeom.LineString(points[::-1])
x = [p[0] for p in points]
y = [p[1] for p in points]
self._x_limits = min(x), max(x)
self._y_limits = min(y), max(y)
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return 2e4
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class _Satellite(Projection):
def __init__(self, projection, satellite_height=35785831,
central_longitude=0.0, central_latitude=0.0,
false_easting=0, false_northing=0, globe=None):
proj4_params = [('proj', projection), ('lon_0', central_longitude),
('lat_0', central_latitude), ('h', satellite_height),
('x_0', false_easting), ('y_0', false_northing),
('units', 'm')]
super(_Satellite, self).__init__(proj4_params, globe=globe)
# TODO: Let the globe return the semimajor axis always.
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
b = np.float(self.globe.semiminor_axis or a)
h = np.float(satellite_height)
max_x = h * math.atan(a / (a + h))
max_y = h * math.atan(b / (b + h))
coords = _ellipse_boundary(max_x, max_y,
false_easting, false_northing, 61)
self._boundary = sgeom.LinearRing(coords.T)
self._xlim = self._boundary.bounds[::2]
self._ylim = self._boundary.bounds[1::2]
self._threshold = np.diff(self._xlim)[0] * 0.02
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._xlim
@property
def y_limits(self):
return self._ylim
class Geostationary(_Satellite):
"""
Perspective view looking directly down from above a point on the equator.
"""
def __init__(self, central_longitude=0.0, satellite_height=35785831,
false_easting=0, false_northing=0, globe=None):
super(Geostationary, self).__init__(
projection='geos',
satellite_height=satellite_height,
central_longitude=central_longitude,
central_latitude=0.0,
false_easting=false_easting,
false_northing=false_northing,
globe=globe)
class NearsidePerspective(_Satellite):
"""
Perspective view looking directly down from above a point on the globe.
"""
def __init__(self, central_longitude=0.0, central_latitude=0.0,
satellite_height=35785831,
false_easting=0, false_northing=0, globe=None):
super(NearsidePerspective, self).__init__(
projection='nsper',
satellite_height=satellite_height,
central_longitude=central_longitude,
central_latitude=central_latitude,
false_easting=false_easting,
false_northing=false_northing,
globe=globe)
class AlbersEqualArea(Projection):
"""
An Albers Equal Area projection
This projection is conic and equal-area, and is commonly used for maps of
the conterminous United States.
"""
def __init__(self, central_longitude=0.0, central_latitude=0.0,
false_easting=0.0, false_northing=0.0,
standard_parallels=(20.0, 50.0), globe=None):
"""
Kwargs:
* central_longitude - The central longitude. Defaults to 0.
* central_latitude - The central latitude. Defaults to 0.
* false_easting - X offset from planar origin in metres.
Defaults to 0.
* false_northing - Y offset from planar origin in metres.
Defaults to 0.
* standard_parallels - The one or two latitudes of correct scale.
Defaults to (20, 50).
* globe - A :class:`cartopy.crs.Globe`.
If omitted, a default globe is created.
"""
proj4_params = [('proj', 'aea'),
('lon_0', central_longitude),
('lat_0', central_latitude),
('x_0', false_easting),
('y_0', false_northing)]
if standard_parallels is not None:
try:
proj4_params.append(('lat_1', standard_parallels[0]))
try:
proj4_params.append(('lat_2', standard_parallels[1]))
except IndexError:
pass
except TypeError:
proj4_params.append(('lat_1', standard_parallels))
super(AlbersEqualArea, self).__init__(proj4_params, globe=globe)
# bounds
n = 103
lons = np.empty(2 * n + 1)
lats = np.empty(2 * n + 1)
tmp = np.linspace(central_longitude - 180, central_longitude + 180, n)
lons[:n] = tmp
lats[:n] = 90
lons[n:-1] = tmp[::-1]
lats[n:-1] = -90
lons[-1] = lons[0]
lats[-1] = lats[0]
points = self.transform_points(self.as_geodetic(), lons, lats)
self._boundary = sgeom.LineString(points)
bounds = self._boundary.bounds
self._x_limits = bounds[0], bounds[2]
self._y_limits = bounds[1], bounds[3]
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return 1e5
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class AzimuthalEquidistant(Projection):
"""
An Azimuthal Equidistant projection
This projection provides accurate angles about and distances through the
central position. Other angles, distances, or areas may be distorted.
"""
def __init__(self, central_longitude=0.0, central_latitude=0.0,
false_easting=0.0, false_northing=0.0,
globe=None):
"""
Kwargs:
* central_longitude - The true longitude of the central meridian in
degrees. Defaults to 0.
* central_latitude - The true latitude of the planar origin in
degrees. Defaults to 0.
* false_easting - X offset from the planar origin in metres.
Defaults to 0.
* false_northing - Y offset from the planar origin in metres.
Defaults to 0.
* globe - An instance of :class:`cartopy.crs.Globe`. If omitted, a
default globe is created.
"""
# Warn when using Azimuthal Equidistant with proj4 < 4.9.2 due to
# incorrect transformation past 90 deg distance (see
# https://github.com/OSGeo/proj.4/issues/246).
if PROJ4_VERSION != ():
if PROJ4_VERSION < (4, 9, 2):
warnings.warn('The Azimuthal Equidistant projection in Proj.4 '
'older than 4.9.2 incorrectly transforms points '
'farther than 90 deg from the origin. Use this '
'projection with caution.')
else:
warnings.warn('Cannot determine Proj.4 version. The Azimuthal '
'Equidistant projection may be unreliable and '
'should be used with caution.')
proj4_params = [('proj', 'aeqd'), ('lon_0', central_longitude),
('lat_0', central_latitude),
('x_0', false_easting), ('y_0', false_northing)]
super(AzimuthalEquidistant, self).__init__(proj4_params, globe=globe)
# TODO: Let the globe return the semimajor axis always.
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
b = np.float(self.globe.semiminor_axis or a)
coords = _ellipse_boundary(a * np.pi, b * np.pi,
false_easting, false_northing, 61)
self._boundary = sgeom.LinearRing(coords.T)
bounds = self._boundary.bounds
self._x_limits = bounds[0], bounds[2]
self._y_limits = bounds[1], bounds[3]
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return 1e5
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class Sinusoidal(Projection):
"""
A Sinusoidal projection.
This projection is equal-area.
"""
def __init__(self, central_longitude=0.0, false_easting=0.0,
false_northing=0.0, globe=None):
"""
Kwargs:
* central_longitude - The central longitude. Defaults to 0.
* false_easting - X offset from planar origin in metres.
Defaults to 0.
* false_northing - Y offset from planar origin in metres.
Defaults to 0.
* globe - A :class:`cartopy.crs.Globe`.
If omitted, a default globe is created.
"""
proj4_params = [('proj', 'sinu'),
('lon_0', central_longitude),
('x_0', false_easting),
('y_0', false_northing)]
super(Sinusoidal, self).__init__(proj4_params, globe=globe)
# Obtain boundary points
points = []
n = 91
geodetic_crs = self.as_geodetic()
for lat in np.linspace(-90, 90, n):
points.append(
self.transform_point(180 + central_longitude,
lat, geodetic_crs)
)
for lat in np.linspace(90, -90, n):
points.append(
self.transform_point(-180 + central_longitude,
lat, geodetic_crs)
)
points.append(
self.transform_point(180 + central_longitude, -90, geodetic_crs))
self._boundary = sgeom.LineString(points[::-1])
minx, miny, maxx, maxy = self._boundary.bounds
self._x_limits = minx, maxx
self._y_limits = miny, maxy
self._threshold = max(np.abs(self.x_limits + self.y_limits)) * 1e-5
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
# MODIS data products use a Sinusoidal projection of a spherical Earth
# http://modis-land.gsfc.nasa.gov/GCTP.html
Sinusoidal.MODIS = Sinusoidal(globe=Globe(ellipse=None,
semimajor_axis=6371007.181,
semiminor_axis=6371007.181))
class _BoundaryPoint(object):
def __init__(self, distance, kind, data):
"""
A representation for a geometric object which is
connected to the boundary.
Parameters
==========
distance - float
The distance along the boundary that this object
can be found.
kind - bool
Whether this object represents a point from the pre-computed
boundary.
data - point or namedtuple
The actual data that this boundary object represents.
"""
self.distance = distance
self.kind = kind
self.data = data
def __repr__(self):
return '_BoundaryPoint(%r, %r, %s)' % (self.distance, self.kind,
self.data)
def _find_first_gt(a, x):
for v in a:
if v.distance > x:
return v
# We've gone all the way around, so pick the first point again.
return a[0]
def epsg(code):
"""
Return the projection which corresponds to the given EPSG code.
The EPSG code must correspond to a "projected coordinate system",
so EPSG codes such as 4326 (WGS-84) which define a "geodetic coordinate
system" will not work.
.. note::
The conversion is performed by querying https://epsg.io/ so a
live internet connection is required.
"""
import cartopy._epsg
return cartopy._epsg._EPSGProjection(code)
| lgpl-3.0 |
jamescorsini/evaluator | portfolio_value.py | 1 | 10234 | # portfolio_value.py
'''
Software used:
http://wiki.quantsoftware.org/index.php?title=QSTK_License
Created on October 16, 2014
Updated on October 29, 2014
@author: James Corsini
@summary: Takes buy and sell data in csv format and outputs the portfolio's
value. Should be used in conjunction with portfolio_analyze.py to get
stats and plots.
'''
# QSTK Imports
import QSTK.qstkutil.qsdateutil as du
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkutil.DataAccess as da
# Third Party Imports
import datetime as dt
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import csv
import math
from numpy import genfromtxt
import time
import os
import shutil
from portfolio_functions import *
def readcsvtona(csvfile):
'''
@summary: Reads in data in from csv (hist_orders format)
@param csvfile: CSV file in the format
year, month, day, stock, order (buy/sell), shares
@return: Numpy arrays of the dates, stocks, orders, and shares
'''
# Reading orders from the csv file.
na_data = np.loadtxt(csvfile, delimiter=",",usecols=(0,1,2,5))
na_dates = np.int_(na_data[:,0:3])
na_share = na_data[:,3]
na_share = na_share.reshape(-1,1)
na_stock = np.genfromtxt(csvfile, delimiter=",",usecols=(3),dtype=str)
na_ordpos = np.genfromtxt(csvfile, delimiter=",",usecols=(4),dtype=str)
return [na_dates, na_stock, na_ordpos, na_share]
def natodict(na_dates,na_stock,na_ordpos,na_share):
'''
@summary: Converts numpy arrays into dict while obtaining Yahoo data
@param na_dates: Array of dates when transaction occurred
@param na_stock: Array of stocks bought/ sold and cash deposited/ withdrawn
@param na_ordpos: Array of transaction types (Buy or Sell)
@param na_share: Array of shares or cash transacted
@return: Dict of symbols (d_data) with index called ldt_timestamps
list of symbols (not repeted) and ldt_trans which is an index for the
transaction dates
'''
# Creating the timestamps from dates read
ldt_trans = []
#print na_stock
for i in range(0, len(na_stock)):
ldt_trans.append(dt.datetime(na_dates[i, 0],
na_dates[i, 1], na_dates[i, 2],16,0,0))
# get stock data
ldt_stocks = list(set(na_stock))
#print ldt_stocks
ls_symbols = ldt_stocks
# Remove _CASH from this array
if '_CASH' in ls_symbols:
ls_symbols.remove('_CASH')
# Creating an object of the dataaccess class with Yahoo as the source.
c_dataobj = da.DataAccess('Yahoo')
# Creating the timestamps from dates read
ldt_timestamps = []
for i in range(0, na_dates.shape[0]):
ldt_timestamps.append(dt.date(na_dates[i, 0],
na_dates[i, 1], na_dates[i, 2]))
# Start and End date of the charts
dt_end = ldt_timestamps[-1]
dt_start = ldt_timestamps[0]
# We need closing prices so the timestamp should be hours=16.
dt_timeofday = dt.timedelta(hours=16)
# Get a list of trading days between the start and the end.
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt_timeofday)
# Keys to be read from the data, it is good to read everything in one go.
ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close']
# Reading the data, now d_data is a dictionary with the keys above.
# Timestamps and symbols are the ones that were specified before.
ldf_data = c_dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
# Filling the data for NAN
for s_key in ls_keys:
d_data[s_key] = d_data[s_key].fillna(method='ffill')
d_data[s_key] = d_data[s_key].fillna(method='bfill')
d_data[s_key] = d_data[s_key].fillna(1.0)
return [d_data,ldt_timestamps,ls_symbols,ldt_trans]
def closeprices(d_data,ldt_timestamps,ls_symbols):
'''
@summary: Get only the close prices from the dict for all symbols
@param d_data: Dict containing all of the Yahoo data
@param ldt_timestamps: List of timestamps from the begining of the portfolio
period until the end
@param ls_symbols: List of symbols in the dict
@return: The close prices in numpy array and dataframe
'''
# Getting the numpy ndarray of close prices.
na_price = d_data['close'].values
# Converting na_price to dataframe
df_price = pd.DataFrame(na_price, index=ldt_timestamps,columns = ls_symbols)
return [na_price, df_price]
def cashval(df_price, ls_symbols, ldt_timestamps, na_stock, na_share, na_ordpos, ldt_trans):
'''
@summary: Calculates the cash and value of the portfolio for each day
@param df_price: Yahoo prices of all the sympbols
@param ls_symbols: List of symbols in the portfolio
@param ldt_timestamps: List of dates and times from the start of the portfolio to the end
@param na_stock: Array of stocks in order of transaction
@param na_share: Array of shares/ cash in order of transaction
@param na_ordpos: Array of orders (Buy/Sell/Deposit/Withdraw) in order of transaction
@param ldt_trans: List of the dates and times of the transactions
@return: The daily value of the portfolio
'''
# Initialize variables
# @future: Can make these inputs to the function
# Value for commission (if wanted)
fl_commission = 7.95
# Value for starting cash
startcash = 0 #1000000
# Making the cash and value array
# this will have rows equal dates and the columns are ls_valsym
ls_valsym = ["PortCash","Value"]
# Create dataframe for value array with 0s
df_val = pd.DataFrame(index=ldt_timestamps, columns=ls_valsym)
df_val = df_val.fillna(0.0)
# Setting first value to be starting cash ammount
df_val['PortCash'].ix[ldt_timestamps[0]] = startcash
# Create dataframe for shares filled with 0s
# indexed by the timestamps and columns are the symbols in the portfolio
df_shares = pd.DataFrame(index=ldt_timestamps, columns=ls_symbols)
df_shares = df_shares.fillna(0)
# Main loop to calculate the shares map (assigns appropriate number of shares
# to each stock for each day. Also, calulates the cash for each day that the
# portfolio is active
for k in range(0,len(na_ordpos)):
rownum = df_shares.index.get_loc(ldt_trans[k])
if na_ordpos[k] == 'Buy':
if na_stock[k]=='_CASH':
#print 'add cash'
df_val['PortCash'].ix[ldt_timestamps[rownum:len(ldt_timestamps)]] = df_val['PortCash'].ix[ldt_timestamps[rownum]]+na_share[k,0]
else:
df_shares[na_stock[k]].ix[ldt_timestamps[rownum:len(ldt_timestamps)]] = df_shares[na_stock[k]].ix[ldt_trans[k]]+na_share[k,0]
df_val['PortCash'].ix[ldt_timestamps[rownum:len(ldt_timestamps)]] = df_val['PortCash'].ix[ldt_timestamps[rownum]]-(na_share[k,0]*df_price[na_stock[k]].ix[ldt_timestamps[rownum]])-fl_commission
#print "BUY; ", na_share[k,0], " ", na_stock[k]], " at ", df_price[na_stock[k]].ix[ldt_timestamps[rownum]]
if na_ordpos[k] == 'Sell':
if na_stock[k]=='_CASH':
#print 'withdraw cash'
df_val['PortCash'].ix[ldt_timestamps[rownum:len(ldt_timestamps)]] = df_val['PortCash'].ix[ldt_timestamps[rownum]]-na_share[k,0]
else:
df_shares[na_stock[k]].ix[ldt_timestamps[rownum:len(ldt_timestamps)]] = df_shares[na_stock[k]].ix[ldt_trans[k]]-na_share[k,0]
df_val['PortCash'].ix[ldt_timestamps[rownum:len(ldt_timestamps)]] = df_val['PortCash'].ix[ldt_timestamps[rownum]]+(na_share[k,0]*df_price[na_stock[k]].ix[ldt_timestamps[rownum]])
#print "SELL; ", na_share[k,0], " ", na_stock[k]], " at ", df_price[na_stock[k]].ix[ldt_timestamps[rownum]]
# Finding the value of the portfolio for each day
for j in range(0,len(ldt_timestamps)):
for sym in ls_symbols:
df_val['Value'].ix[ldt_timestamps[j]] = df_val['Value'].ix[ldt_timestamps[j]]+(df_shares[sym].ix[ldt_timestamps[j]]*df_price[sym].ix[ldt_timestamps[j]])
# To get value of portfolio for specific day
#valdate = [yyyy,mm,dd,16,0,0]
#print "Value for portfolio at ", valdate, ": ", df_val.index.get_loc(valdate)
# Uses numpy to sum the cash and value columns to obtain portfolio value
na_portval = np.sum(df_val, axis=1)
# Converts portfolio value (value + cash) back to dataframe
df_portval = pd.DataFrame(na_portval,index=ldt_timestamps, columns=['PortValue'])
return df_portval
def printvalcsv(df_portval, ldt_timestamps):
'''
@summary: Creates CSV file and installs it in the proper directory.
@param df_portval: Dataframe of the portfolio values
@param ldt_timestamps: List of date times in timestamps
@return: Nothing
'''
# Creates directory if it doesn't exist
# Warning if backup folder does not exist but output folder does
# then on first run a backup will not take place.
# In this case, run it twice to save it
if not os.path.exists('./output/backup'):
os.makedirs('./output/backup')
else:
shutil.copy2('./output/values.csv', './output/backup/values' + timestamp() + '.csv')
# Writes CSV file
csv_value = csv.writer(open("./output/values.csv","wb"), delimiter=',',quoting=csv.QUOTE_NONE)
for j in range(0,len(ldt_timestamps)):
csv_value.writerow([ldt_timestamps[j].year,ldt_timestamps[j].month,ldt_timestamps[j].day,df_portval['PortValue'].ix[ldt_timestamps[j]]])
return
if __name__ == '__main__':
start_value = time.time()
[na_dates,na_stock,na_ordpos,na_share] = readcsvtona('./hist_orders/hist_orders.csv')
[d_data,ldt_timestamps,ls_symbols,ldt_trans] = natodict(na_dates,na_stock,na_ordpos,na_share)
[na_price, df_price] = closeprices(d_data,ldt_timestamps,ls_symbols)
df_portval = cashval(df_price, ls_symbols, ldt_timestamps, na_stock, na_share, na_ordpos, ldt_trans)
printvalcsv(df_portval, ldt_timestamps)
print "Portfolio Value: ", df_portval['PortValue'][-1]
print "Portfolio_value run in: " , (time.time() - start_value) , " seconds.";
print "Done"
| mit |
fzalkow/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py | 254 | 2005 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
ngoix/OCRF | examples/manifold/plot_swissroll.py | 330 | 1446 | """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
| bsd-3-clause |
massmutual/scikit-learn | benchmarks/bench_plot_nmf.py | 90 | 5742 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, init='random'):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
init : string
Method used to initialize the procedure.
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
W, H = _initialize_nmf(V, r, init, random_state=0)
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init='random', max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, init='random', tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization'
'benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
JPFrancoia/scikit-learn | examples/cluster/plot_face_segmentation.py | 71 | 2839 | """
===================================================
Segmenting the picture of a raccoon face in regions
===================================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
# load the raccoon face as a numpy array
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
# Resize it to 10% of the original size to speed up the processing
face = sp.misc.imresize(face, 0.10) / 255.
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(face)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / graph.data.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 25
#############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels, random_state=1)
t1 = time.time()
labels = labels.reshape(face.shape)
plt.figure(figsize=(5, 5))
plt.imshow(face, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS))])
plt.xticks(())
plt.yticks(())
title = 'Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0))
print(title)
plt.title(title)
plt.show()
| bsd-3-clause |
vibhorag/scikit-learn | examples/exercises/plot_iris_exercise.py | 323 | 1602 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| bsd-3-clause |
matz-e/lobster | lobster/commands/plot.py | 1 | 60092 | # vim: fileencoding=utf-8
from collections import defaultdict, Counter
from cycler import cycler
from datetime import datetime
import glob
import gzip
import itertools
import jinja2
import logging
import multiprocessing
import os
import pickle
import shutil
import sqlite3
import signal
import time
import re
import string
import json
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as dates
import matplotlib.ticker as ticker
import numpy as np
from scipy.interpolate import UnivariateSpline
from lobster import util
from lobster.core import unit
from lobster.core.command import Command
from WMCore.DataStructs.LumiList import LumiList
matplotlib.rc('axes', labelsize='large')
matplotlib.rc('figure', figsize=(8, 1.5))
matplotlib.rc('figure.subplot', left=0.09, right=0.96, bottom=0.275)
matplotlib.rc('hatch', linewidth=.3)
matplotlib.rc('font', size=7)
matplotlib.rc('font', **{'sans-serif': 'Liberation Sans', 'family': 'sans-serif'})
logger = logging.getLogger('lobster.plotting')
def reset_signals():
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
def reduce(a, idx, interval):
quant = a[:, idx]
last = quant[0]
select = np.ones((len(quant),), dtype=np.bool)
for i in range(1, len(quant) - 1):
if quant[i] - last > interval or quant[i + 1] - last > interval:
select[i] = True
last = quant[i]
else:
select[i] = False
return a[select]
def split_by_column(a, col, key=lambda x: x, threshold=None):
"""Split an array into multiple ones, based on unique values in the named
column `col`.
"""
keys = np.unique(a[col])
vals = [a[a[col] == v] for v in keys]
keys = map(key, keys)
if threshold:
total = float(len(a))
others = filter(lambda v: len(v) / total < threshold, vals)
keys, vals = zip(*filter(lambda (k, v): len(v) /
total >= threshold, zip(keys, vals)))
if len(others) > 0:
keys += ("Other", )
vals += (np.concatenate(others), )
return keys, vals
def unix2matplotlib(time):
return dates.date2num(datetime.fromtimestamp(time))
def unpack(arg):
source, target = arg
try:
if os.path.isfile(target):
logger.info("skipping {0}".format(source))
return
logger.info("unpacking {0}".format(source))
with open(target, 'w') as output:
input = gzip.open(source, 'rb')
output.writelines(input)
input.close()
except IOError:
logger.error("cannot unpack {0}".format(source))
def mp_call(arg):
fct, args, kwargs = arg
try:
fct(*args, **kwargs)
except Exception as e:
logger.error('method {0} failed with "{1}", using args {2}, {3}'.format(fct, e, args, kwargs))
logger.exception(e)
def mp_pickle(plotdir, name, data):
logger.debug("Saving data for {0}".format(name))
with open(os.path.join(plotdir, name + '.pkl'), 'wb') as f:
pickle.dump(data, f)
def mp_pie(vals, labels, name, plotdir=None, **kwargs):
vals = [max(0, val) for val in vals]
fig, ax = plt.subplots()
fig.set_size_inches(4, 3)
ratio = 0.75
ax.set_position([0.3, 0.3, ratio * 0.7, 0.7])
paper = kwargs.pop('paper', False)
if paper:
plt.rcParams['patch.force_edgecolor'] = True
if 'colors' in kwargs:
del kwargs['colors']
hatching = [p * 5 for p in ['/', '\\', '|', '-', 'x', '+', '*']]
monochrome = \
cycler('hatch', hatching) * \
cycler('edgecolor', ['k']) * \
cycler('color', ['w']) * \
cycler('linewidth', [.5])
ax.set_prop_cycle(monochrome)
newlabels = []
total = sum(vals)
for label, val in zip(labels, vals):
if float(val) / total < .025:
newlabels.append('')
else:
newlabels.append(label)
with open(os.path.join(plotdir, name + '.dat'), 'w') as f:
for l, v in zip(labels, vals):
f.write('{0}\t{1}\n'.format(l, v))
patches, texts = ax.pie([max(0, val) for val in vals], labels=newlabels, **kwargs)
if paper:
for p, h in zip(patches, itertools.cycle(hatching)):
p.set_hatch(h)
p.set_linewidth(.5)
boxes = []
newlabels = []
for patch, text, label in zip(patches, texts, labels):
if isinstance(label, basestring) and len(text.get_text()) == 0 and len(label) > 0:
boxes.append(patch)
newlabels.append(label)
if len(boxes) > 0:
ax.legend(boxes, newlabels, ncol=2, mode='expand',
bbox_to_anchor=(0, 0, 1, .3),
bbox_transform=plt.gcf().transFigure,
title='Small Slices',
prop={'size': 6})
return mp_saveimg(plotdir, name)
def smooth_data(a):
b = []
for (x, y) in a:
if isinstance(x, list):
x = np.array(x)
if isinstance(y, list):
y = np.array(y)
if len(x) == 0:
b.append(([], []))
continue
s = np.linspace(x.min(), x.max(), 400)
t = UnivariateSpline(x, y, s=.1)
b.append((s, t(s)))
del t
return b
class BetterFormatter(ticker.ScalarFormatter):
def __init__(self):
super(BetterFormatter, self).__init__(useMathText=True)
self.set_powerlimits((-3, 7))
def pprint_val(self, value):
if round(value, 0) == value:
value = int(value)
if self.orderOfMagnitude != 0:
value /= 10.0 ** self.orderOfMagnitude
return "{:,}".format(value)
def mp_plot(a, xlabel, stub=None, ylabel='tasks', bins=50, modes=None, ymax=None, xmin=None, xmax=None, plotdir=None, **kwargs):
if not modes:
modes = [Plotter.PROF | Plotter.TIME, Plotter.HIST]
paper = kwargs.pop('paper', False)
oldxlabel = xlabel
oldylabel = ylabel
for mode in modes:
filename = stub
fig, ax = plt.subplots()
hatching = []
xlabel = oldxlabel
ylabel = oldylabel
ax.yaxis.set_major_formatter(BetterFormatter())
# to pickle plot contents
data = {'data': a, 'bins': bins, 'labels': kwargs.get('label')}
if paper:
hatching = [p * 8 for p in ['/', '\\', '|', '-', 'x', '+', '*']]
if 'color' in kwargs:
del kwargs['color']
if mode & Plotter.HIST:
monochrome = \
cycler('hatch', hatching) * \
cycler('edgecolor', ['k']) * \
cycler('color', ['w']) * \
cycler('linewidth', [.5])
ax.set_prop_cycle(monochrome)
elif mode & Plotter.STACK:
a = smooth_data(a)
monochrome = \
cycler('hatch', hatching) * \
cycler('edgecolor', ['k']) * \
cycler('facecolor', ['w']) * \
cycler('linewidth', [.5])
ax.set_prop_cycle(monochrome)
elif mode & Plotter.PROF:
kwargs['linewidth'] = .8
monochrome = \
cycler('color', ['k', 'gray']) * \
cycler('marker', ['o', 'v', '^', 's', 'P', 'X', 'D'])
ax.set_prop_cycle(monochrome)
else:
a = smooth_data(a)
monochrome = \
cycler('color', ['k', 'gray']) * \
cycler('linewidth', [1.]) * \
cycler('linestyle', ['-', '--', ':', '-.']) * \
cycler('marker', [''])
ax.set_prop_cycle(monochrome)
if mode & Plotter.TIME:
f = np.vectorize(unix2matplotlib)
a = [(f(x), y) for (x, y) in a if len(x) > 0]
data['data'] = a
# interval = 2**math.floor(math.log((bins[-1] - bins[0]) / 9000.0) / math.log(2))
# num_bins = map(unix2matplotlib, bins)
# ax.xaxis.set_major_locator(dates.MinuteLocator(byminute=range(0, 60, 15), interval=24*60))
ax.xaxis.set_major_formatter(dates.DateFormatter("%m-%d\n%H:%M"))
ylabel = xlabel
else:
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if mode & Plotter.HIST:
filename += '-hist'
if mode & Plotter.TIME:
borders = (unix2matplotlib(xmin), unix2matplotlib(xmax))
count, bins, patches = ax.hist([x for (x, y) in a], weights=[y for (x, y) in a],
bins=bins, histtype='stepfilled', stacked=True,
range=borders, **kwargs)
if '/' not in ylabel:
ax.set_ylabel('{} / {:.0f} min'.format(ylabel,
(bins[1] - bins[0]) * 24 * 60.))
else:
ax.hist([y for (x, y) in a], bins=bins,
histtype='stepfilled', stacked=True, **kwargs)
elif mode & Plotter.PROF:
filename += '-prof'
data['data'] = []
markers = itertools.cycle(['o', 'v', '^', 's', 'P', 'X', 'D'])
for i, (x, y) in enumerate(a):
borders = (unix2matplotlib(xmin), unix2matplotlib(xmax))
sums, edges = np.histogram(
x, bins=bins, range=borders, weights=y)
squares, edges = np.histogram(
x, bins=bins, range=borders, weights=np.multiply(y, y))
counts, edges = np.histogram(x, bins=bins, range=borders)
avg = np.divide(sums, counts)
avg_sq = np.divide(squares, counts)
err = np.sqrt(np.subtract(avg_sq, np.multiply(avg, avg)))
sel = counts > 0
newargs = dict(kwargs)
if 'color' in newargs:
newargs['color'] = newargs['color'][i]
if 'label' in newargs:
newargs['label'] = newargs['label'][i]
centers = np.array([.5 * (x + y)
for x, y in zip(edges[:-1], edges[1:])])
ax.errorbar(centers[sel], avg[sel], yerr=err[sel],
fmt='o', marker=next(markers), ms=3, capsize=0, **newargs)
data['data'].append((centers, avg, err))
elif mode & Plotter.PLOT:
filename += '-plot'
if 'label' in kwargs:
for (l, (x, y)) in zip(kwargs['label'], a):
ax.plot(x, y, label=l)
else:
for (x, y) in a:
ax.plot(x, y)
elif mode & Plotter.STACK:
filename += '-stack'
t = []
ys = []
for x, y in a:
ys.append(y)
t = x
if not paper:
kwargs['edgecolor'] = 'none'
ps = ax.stackplot(t, *ys, **kwargs)
if paper:
for p, h in zip(ps, itertools.cycle(hatching)):
p.set_edgecolor('k')
p.set_facecolor('w')
p.set_hatch(h)
p.set_linewidth(.5)
ax.grid(True)
if mode & Plotter.TIME:
ax.axis(xmin=unix2matplotlib(xmin),
xmax=unix2matplotlib(xmax), ymin=0)
else:
ax.axis(ymin=0)
if ymax:
ax.axis(ymax=ymax)
if not mode & Plotter.TIME and mode & Plotter.HIST and not paper:
labels = kwargs.get('label', [''] * len(a))
stats = {}
for label, (x, y) in zip(labels, a):
avg = np.average(y)
var = np.std(y)
med = np.median(y)
stats[label] = (avg, var, med)
info = u"{0}μ = {1:.3g}, σ = {2:.3g}, median = {3:.3g}"
t = ax.text(0.75, 0.7,
'\n'.join([info.format(label + ': ' if len(stats) > 1 else '', avg, var, med)
for label, (avg, var, med) in stats.items()]),
ha="center", va="center", transform=ax.transAxes, backgroundcolor='w')
t.set_bbox({'color': 'w', 'alpha': .5, 'edgecolor': 'w'})
if 'label' in kwargs or 'labels' in kwargs:
ax.legend(bbox_to_anchor=(.5, .95),
frameon=False,
loc='lower center',
ncol=len(kwargs.get('label', kwargs.get('labels'))),
numpoints=1,
prop={'size': 7})
mp_pickle(plotdir, filename, data)
mp_saveimg(plotdir, filename)
def mp_saveimg(plotdir, name):
logger.info("Saving {0}".format(name))
plt.savefig(os.path.join(plotdir, name + '.png'))
plt.savefig(os.path.join(plotdir, name + '.pdf'))
plt.close()
class Plotter(object):
TIME = 1
HIST = 2
PLOT = 4
PROF = 8
STACK = 16
def __init__(self, config, outdir=None, paper=False):
self.config = config
self.__paper = paper
self.__store = unit.UnitStore(config)
util.verify(self.config.workdir)
if outdir:
self.__plotdir = outdir
else:
self.__plotdir = config.plotdir if config.plotdir else self.config.label
self.__plotdir = os.path.expandvars(os.path.expanduser(self.__plotdir))
if not os.path.isdir(self.__plotdir):
os.makedirs(self.__plotdir)
def parsetime(self, time):
if not time:
return None
try:
t = datetime.combine(
datetime.today().date(),
datetime.strptime(time, '%H:%M').timetz()
)
return int(t.strftime('%s'))
except ValueError:
pass
try:
t = datetime.strptime(time, '%Y-%m-%d_%H:%M')
return int(t.strftime('%s'))
except ValueError:
pass
t = datetime.strptime(time, '%Y-%m-%d')
return int(t.strftime('%s'))
def readdb(self):
logger.debug('reading database')
db = sqlite3.connect(os.path.join(
self.config.workdir, 'lobster.db'), timeout=90)
self.wflow_ids = {}
self.wflow_labels = {}
wflow_cores = {}
for id_, label in db.execute("select id, label from workflows"):
self.wflow_ids[label] = id_
self.wflow_labels[id_] = label
wflow_cores[id_] = getattr(
self.config.workflows, label).category.cores
cur = db.execute(
"select * from tasks where time_retrieved>=? and time_retrieved<=?",
(self.__xmin, self.__xmax))
fields = [xs[0] for xs in cur.description]
textfields = ['host', 'published_file_block']
formats = ['i4' if f not in textfields else 'a100' for f in fields]
tasks = np.array(cur.fetchall(), dtype={
'names': fields, 'formats': formats})
# cores = [wflow_cores[n] for n in tasks['workflow']]
# tasks = rfn.append_fields(tasks, 'cores', data=cores, usemask=False)
failed_tasks = tasks[tasks['status'] == 3] if len(
tasks) > 0 else np.array([], tasks.dtype)
success_tasks = tasks[np.in1d(tasks['status'], (2, 6, 7, 8))] if len(
tasks) > 0 else np.array([], tasks.dtype)
summary_data = list(self.__store.workflow_status())[1:]
# for cases where units per task changes during run, get per-unit info
total_units = 0
start_units = 0
completed_units = []
units_processed = {}
transfers = {}
for (label,) in db.execute("select label from workflows"):
total_units += db.execute(
"select count(*) from units_{0}".format(label)).fetchone()[0]
start_units += db.execute("""
select count(*)
from units_{0}, tasks
where units_{0}.task == tasks.id
and (units_{0}.status=2 or units_{0}.status=6)
and time_retrieved<=?""".format(label), (self.__xmin,)).fetchone()[0]
completed_units.append(np.array(db.execute("""
select units_{0}.id, tasks.time_retrieved
from units_{0}, tasks
where units_{0}.task == tasks.id
and (units_{0}.status=2 or units_{0}.status=6)
and time_retrieved>=? and time_retrieved<=?""".format(label),
(self.__xmin, self.__xmax)).fetchall(),
dtype=[('id', 'i4'), ('time_retrieved', 'i4')]))
units_processed[label] = db.execute("""
select units_{0}.run,
units_{0}.lumi
from units_{0}, tasks
where units_{0}.task == tasks.id
and (units_{0}.status in (2, 6))""".format(label)).fetchall()
transfers[label] = json.loads(db.execute("""
select transfers
from workflows
where label=?""", (label,)).fetchone()[0])
logger.debug('finished reading database')
return success_tasks, failed_tasks, summary_data, np.concatenate(completed_units), total_units, total_units - start_units, units_processed, transfers
def readlog(self, filename=None, category='all'):
if filename:
fn = filename
else:
fn = os.path.join(self.config.workdir,
'lobster_stats_{}.log'.format(category))
with open(fn) as f:
headers = dict(map(lambda (a, b): (b, a),
enumerate(f.readline()[1:].split())))
stats = np.loadtxt(fn)
# fix units of time
stats[:, 0] /= 1e6
for label in ['joined', 'removed', 'lost', 'idled_out', 'fast_aborted', 'blacklisted', 'released']:
field = 'workers_{}'.format(label)
stats[:, headers[field]] = np.maximum(stats[:, headers[field]] - np.roll(stats[:, headers[field]], 1, 0), 0)
if not filename and category == 'all':
self.__total_xmin = stats[0, 0]
self.__total_xmax = stats[-1, 0]
if not self.__xmin:
self.__xmin = stats[0, 0]
if not self.__xmax:
self.__xmax = stats[-1, 0]
return headers, stats[np.logical_and(stats[:, 0] >= self.__xmin, stats[:, 0] <= self.__xmax)]
def savejsons(self, processed):
jsondir = os.path.join(self.__plotdir, 'jsons')
if not os.path.exists(jsondir):
os.makedirs(jsondir)
res = {}
for label in processed:
if not os.path.exists(os.path.join(self.__plotdir, jsondir)):
os.makedirs(os.path.join(self.__plotdir, jsondir))
units = LumiList(lumis=processed[label])
units.writeJSON(os.path.join(
jsondir, 'processed_{}.json'.format(label)))
res[label] = [
(os.path.join('jsons', 'processed_{}.json'.format(label)), 'processed')]
published = os.path.join(
self.config.workdir, label, 'published.json')
if os.path.isfile(published):
shutil.copy(published, os.path.join(
jsondir, 'published_{}.json'.format(label)))
res[label] += [(os.path.join('jsons',
'published_{}.json'.format(label)), 'published')]
return res
def savelogs(self, failed_tasks, samples=10):
logdir = os.path.join(self.__plotdir, 'logs')
work = []
codes = {}
for exit_code, tasks in zip(*split_by_column(failed_tasks[['id', 'exit_code']], 'exit_code')):
if exit_code == 0:
continue
codes[exit_code] = [len(tasks), []]
logger.info(
"Copying sample logs for exit code {0}".format(exit_code))
for id, e in list(tasks[-samples:]):
try:
source = glob.glob(os.path.join(
self.config.workdir, '*', 'failed', util.id2dir(id)))[0]
except IndexError:
continue
s = os.path.join(source, 'task.log.gz')
t = os.path.join(logdir, str(id) + '.log')
if os.path.exists(s):
codes[exit_code][1].append(str(id))
work.append([s, t])
for label, _, _, _, _, _, _, _, _, failed, skipped, _, _, _ in list(self.__store.workflow_status())[1:-1]:
if failed + skipped == 0:
continue
failed = self.__store.failed_units(label)
skipped = self.__store.skipped_files(label)
for id in failed:
source = os.path.join(
self.config.workdir, label, 'failed', util.id2dir(id))
target = os.path.join(logdir, 'failed_' + label)
if not os.path.exists(target):
os.makedirs(target)
for l in ['task.log.gz']:
s = os.path.join(source, l)
t = os.path.join(target, str(id) + "_" + l[:-3])
if os.path.exists(s):
work.append([s, t])
if len(skipped) > 0:
outname = os.path.join(logdir, 'skipped_{}.txt'.format(label))
if not os.path.isdir(os.path.dirname(outname)):
os.makedirs(os.path.dirname(outname))
with open(outname, 'w') as f:
f.write('\n'.join(skipped))
pool = multiprocessing.Pool(10, reset_signals)
pool.map(unpack, work)
pool.close()
pool.join()
for code in codes:
for id in range(samples - len(codes[code][1])):
codes[code][1].insert(0, "")
return codes
def updatecpu(self, tasks, edges):
cputime = np.zeros(len(edges) - 1)
ratio = tasks['time_cpu'] * 1. / \
(tasks['time_processing_end'] - tasks['time_prologue_end'])
starts = np.digitize(tasks['time_prologue_end'], edges)
ends = np.digitize(tasks['time_processing_end'], edges)
lefts = np.take(edges, starts) - tasks['time_prologue_end']
rights = tasks['time_processing_end'] - np.take(edges, ends - 1)
for fraction, left, start, end, right in zip(ratio, lefts, starts, ends, rights):
if start == end and start > 0 and start <= len(cputime):
# do some special logic if the task is completely in one
# bin: length = left - (bin width - right)
cputime[start - 1] += fraction * \
(left + right - (edges[start] - edges[start - 1]))
else:
if start > 0 and start <= len(cputime):
cputime[start - 1] += fraction * left
cputime[start:end - 1] += fraction * 60
if end >= 0 and end < len(cputime):
cputime[end] += fraction * right
return cputime
def plot(self, a, xlabel, stub=None, ylabel='tasks', bins=50, modes=None, **kwargs_raw):
args = [a, xlabel]
kwargs = {
'stub': stub,
'ylabel': ylabel,
'bins': bins,
'modes': modes,
'xmin': self.__xmin,
'xmax': self.__xmax,
'plotdir': self.__plotdir,
'paper': self.__paper
}
kwargs.update(kwargs_raw)
self.__plotargs.append((mp_plot, args, kwargs))
def pie(self, vals, labels, name, **kwargs_raw):
kwargs = {'plotdir': self.__plotdir, 'paper': self.__paper}
kwargs.update(kwargs_raw)
self.__plotargs.append((mp_pie, [vals, labels, name], kwargs))
def make_foreman_plots(self):
tasks = []
idleness = []
efficiencies = []
names = []
for filename in self.__foremen:
headers, stats = self.readlog(filename)
foreman = os.path.basename(filename)
if re.match('.*log+', foreman):
foreman = foreman[:foreman.rfind('.')]
foreman = string.strip(foreman)
names.append(foreman)
tasks.append((stats[:, headers['timestamp']],
stats[:, headers['tasks_running']]))
idleness.append((stats[:, headers['timestamp']], stats[
:, headers['idle_percentage']]))
efficiencies.append(
(stats[:, headers['timestamp']], stats[:, headers['efficiency']]))
self.plot(
[
(stats[:, headers['timestamp']],
stats[:, headers['workers_busy']]),
(stats[:, headers['timestamp']],
stats[:, headers['workers_idle']]),
(stats[:, headers['timestamp']], stats[
:, headers['workers_connected']])
],
'Workers', foreman + '-workers',
modes=[Plotter.PLOT | Plotter.TIME],
label=['busy', 'idle', 'connected']
)
self.plot(
[
(stats[:, headers['timestamp']],
stats[:, headers['workers_joined']]),
(stats[:, headers['timestamp']],
stats[:, headers['workers_removed']])
],
'Workers', foreman + '-turnover',
modes=[Plotter.HIST | Plotter.TIME],
label=['joined', 'removed']
)
self.pie(
[
np.sum(stats[:, headers['time_workers_execute_good']]),
np.sum(stats[:, headers['time_workers_execute']]) -
np.sum(stats[:, headers['time_workers_execute_good']])
],
["good execute time", "total-good execute time"],
foreman + "-time-pie",
colors=["green", "red"]
)
if len(names) == 0:
return names
self.plot(
tasks,
'Tasks', 'foreman-tasks',
modes=[Plotter.PLOT | Plotter.TIME],
label=names
)
self.plot(
idleness,
'Idle', 'foreman-idle',
modes=[Plotter.PLOT | Plotter.TIME],
label=names
)
self.plot(
efficiencies,
'Efficiency', 'foreman-efficiency',
modes=[Plotter.PLOT | Plotter.TIME],
label=names
)
return names
def find_failure_hosts(self, failed_tasks, samples=10):
if len(failed_tasks) == 0:
return []
hosts, counts = np.unique(failed_tasks['host'], return_counts=True)
ind = np.argpartition(counts, -1)[-samples:]
ind = ind[np.argsort(counts[ind])]
hosts = hosts[ind]
counts = counts[ind]
failures, errors = np.unique(failed_tasks[np.in1d(failed_tasks['host'], hosts)][
'exit_code'], return_counts=True)
ind = np.argpartition(errors, -1)[-samples:]
ind = ind[np.argsort(errors[ind])]
failures = failures[ind][::-1]
table = [["All"] + list(failures)]
for h, c in reversed(zip(hosts, counts)):
host_tasks = failed_tasks[failed_tasks['host'] == h]
table.append(
[h, c] + [len(host_tasks[host_tasks['exit_code'] == f]) for f in failures])
return table
def merge_transfers(self, transfers, labels):
res = defaultdict(lambda: Counter({
'stage-in success': 0,
'stageout success': 0,
'stage-in failure': 0,
'stageout failure': 0
}))
for label in labels:
for protocol in transfers[label]:
res[protocol].update(Counter(transfers[label][protocol]))
return res
def make_time_fraction_plot(self, category):
headers, stats = self.__category_stats[category]
wq_labels = [
'time_send', 'time_receive', 'time_status_msgs',
'time_internal', 'time_polling', 'time_application'
]
lobster_labels = ['status', 'create', 'action', 'update', 'fetch', 'return']
return_labels = ['dash', 'handler', 'updates', 'elk', 'transfers', 'cleanup', 'propagate', 'sqlite']
times = stats[:, headers['timestamp']]
centers = ((times + np.roll(times, 1, 0)) * 0.5)[1:]
centers[0] = times[0]
centers[-1] = times[-1]
def diff(label):
label = 'total_{}_time'.format(label) if 'time' not in label else label
quant = stats[:, headers[label]]
return (quant - np.roll(quant, 1, 0))[1:]
wq_stats = dict((label, diff(label)) for label in wq_labels)
lobster_stats = dict((label, diff(label)) for label in lobster_labels)
return_stats = dict((label, diff('source_' + label)) for label in return_labels)
time_diff = ((times - np.roll(times, 1, 0)) * 1e6)[1:]
everything = np.sum(lobster_stats.values(), axis=0)
other = time_diff - everything
self.plot(
[
(centers, np.divide(lobster_stats[label], everything)) for label in lobster_labels
] + [
(centers, np.divide(other, everything))
],
'Lobster fraction', os.path.join(category, 'lobster-fraction'),
modes=[Plotter.STACK | Plotter.TIME],
labels=lobster_labels + ['other'],
ymax=1.
)
# This is the odd one out, since WQ only provides us with an idle
# fraction
idle_total = np.multiply(
stats[:, headers['timestamp']] - stats[0, headers['timestamp']],
stats[:, headers['idle_percentage']]
)
wq_stats['idle'] = (idle_total - np.roll(idle_total, 1, 0))[1:]
everything = np.sum(wq_stats.values(), axis=0)
other = lobster_stats['fetch'] - everything
self.plot(
[
(centers, np.divide(wq_stats[label], everything)) for label in wq_labels
] + [
(centers, np.divide(other, everything))
],
'WQ fraction', os.path.join(category, 'wq-fraction'),
modes=[Plotter.STACK | Plotter.TIME],
labels=[x.replace('time_', '').replace('_', ' ') for x in wq_labels] + ['other'],
ymax=1.
)
everything = np.sum(return_stats.values(), axis=0)
other = lobster_stats['return'] - everything
self.plot(
[
(centers, np.divide(return_stats[label], everything)) for label in return_labels
] + [
(centers, np.divide(other, everything))
],
'Return fraction', os.path.join(category, 'return-fraction'),
modes=[Plotter.STACK | Plotter.TIME],
labels=[x.replace('_', ' ') for x in return_labels] + ['other'],
ymax=1.
)
def make_master_plots(self, category, good_tasks, success_tasks):
headers, stats = self.__category_stats[category]
edges = np.histogram(stats[:, headers['timestamp']], bins=50)[1]
self.make_time_fraction_plot(category)
self.plot(
[
(stats[:, headers['timestamp']],
stats[:, headers['workers_busy']]),
(stats[:, headers['timestamp']],
stats[:, headers['workers_idle']]),
(stats[:, headers['timestamp']],
stats[:, headers['workers_init']]),
(stats[:, headers['timestamp']],
stats[:, headers['workers_connected']]),
(stats[:, headers['timestamp']],
stats[:, headers['workers_able']])
],
'Workers', os.path.join(category, 'workers'),
modes=[Plotter.PLOT | Plotter.TIME],
label=['busy', 'idle', 'init', 'connected', 'able']
)
for resource, unit_ in (('cores', ''), ('memory', '/ MB'), ('disk', '/ MB')):
scale = 1
if unit_ == '/ MB' \
and max(stats[:, headers['total_' + resource]]) > 1000 \
and max(stats[:, headers['committed_' + resource]]) > 1000:
scale = 1000
unit_ = '/ GB'
self.plot(
[
(stats[:, headers['timestamp']], stats[
:, headers['total_' + resource]] / scale),
(stats[:, headers['timestamp']], stats[
:, headers['committed_' + resource]] / scale)
],
'{} {}'.format(resource[0].upper() +
resource[1:], unit_).strip(),
os.path.join(category, resource),
modes=[Plotter.PLOT | Plotter.TIME],
label=['total', 'committed']
)
self.plot(
[(stats[:, headers['timestamp']], stats[:, headers['tasks_running']])],
'Tasks', os.path.join(category, 'tasks'),
modes=[Plotter.PLOT | Plotter.TIME],
label=['running']
)
self.plot(
[
(stats[:, headers['timestamp']],
stats[:, headers['workers_joined']]),
(stats[:, headers['timestamp']],
stats[:, headers['workers_removed']])
],
'Workers', os.path.join(category, 'turnover'),
modes=[Plotter.HIST | Plotter.TIME],
label=['joined', 'removed']
)
self.plot(
[
(stats[:, headers['timestamp']],
stats[:, headers['workers_lost']]),
(stats[:, headers['timestamp']], stats[
:, headers['workers_idled_out']]),
(stats[:, headers['timestamp']], stats[
:, headers['workers_fast_aborted']]),
(stats[:, headers['timestamp']], stats[
:, headers['workers_blacklisted']]),
(stats[:, headers['timestamp']], stats[
:, headers['workers_released']]),
],
'Workers', os.path.join(category, 'worker-deaths'),
modes=[Plotter.HIST | Plotter.TIME],
label=['evicted', 'idled out',
'fast aborted', 'blacklisted', 'released']
)
if len(good_tasks) > 0:
def integrate_wall(q):
def integrate((x, y)):
indices = np.logical_and(stats[:, 0] >= x, stats[:, 0] < y)
values = stats[indices, headers[q]]
if len(values) > 0:
return np.sum(values) * (y - x) / len(values)
return 0
return integrate
walltime = np.array(map(integrate_wall('committed_cores'), zip(edges[:-1], edges[1:])))
cputime = self.updatecpu(success_tasks, edges)
centers = [(x + y) / 2 for x, y in zip(edges[:-1], edges[1:])]
cputime[walltime == 0] = 0.
walltime[walltime == 0] = 1e-6
ratio = np.nan_to_num(np.divide(cputime * 1.0, walltime))
self.plot(
[(centers, ratio)],
'CPU / Wall', os.path.join(category, 'cpu-wall'),
bins=50,
modes=[Plotter.HIST | Plotter.TIME]
)
ratio = np.nan_to_num(np.divide(np.cumsum(cputime) * 1.0, np.cumsum(walltime)))
self.plot(
[(centers, ratio)],
'Integrated CPU / Wall', os.path.join(
category, 'cpu-wall-int'),
bins=50,
modes=[Plotter.HIST | Plotter.TIME]
)
walltime = np.array(map(integrate_wall('total_cores'), zip(edges[:-1], edges[1:])))
walltime[walltime == 0] = 1e-6
ratio = np.nan_to_num(np.divide(cputime * 1.0, walltime))
self.plot(
[(centers, ratio)],
'CPU / Coretime', os.path.join(category, 'cpu-cores'),
bins=50,
modes=[Plotter.HIST | Plotter.TIME]
)
ratio = np.nan_to_num(np.divide(np.cumsum(cputime) * 1.0, np.cumsum(walltime)))
self.plot(
[(centers, ratio)],
'Integrated CPU / Coretime', os.path.join(category, 'cpu-cores-int'),
bins=50,
modes=[Plotter.HIST | Plotter.TIME]
)
return edges
def make_workflow_plots(self, subdir, edges, good_tasks, failed_tasks, success_tasks, merge_tasks, xmin=None, xmax=None):
if len(good_tasks) > 0 or len(failed_tasks) > 0:
headers, stats = self.__category_stats[subdir]
dtime = stats[:, headers['timestamp']] - np.roll(stats[:, headers['timestamp']], 1, 0)
dtime[dtime < 0] = 0.
mcommitted = (stats[:, headers['committed_cores']] + np.roll(stats[:, headers['committed_cores']], 1, 0)) * .5
mtotal = (stats[:, headers['total_cores']] + np.roll(stats[:, headers['total_cores']], 1, 0)) * .5
self.pie(
[
np.dot(dtime, mtotal - mcommitted),
np.dot(good_tasks['cores'], good_tasks['time_total_on_worker'] - good_tasks['time_on_worker']) +
np.dot(failed_tasks['cores'], failed_tasks['time_total_on_worker'] - failed_tasks['time_on_worker']),
(
np.dot(good_tasks['cores'], good_tasks['time_total_exhausted_execution']) +
np.dot(failed_tasks['cores'], failed_tasks['time_total_exhausted_execution'])
),
np.dot(failed_tasks['cores'], failed_tasks['time_total_on_worker']),
np.dot(good_tasks['cores'], good_tasks['time_prologue_end'] - good_tasks['time_transfer_in_start']),
np.dot(good_tasks['cores'], good_tasks['time_processing_end'] - good_tasks['time_prologue_end']),
np.dot(good_tasks['cores'], good_tasks['time_transfer_out_end'] - good_tasks['time_processing_end'])
],
["Cores-idle", "Eviction", "Exhausted", "Failed", "Overhead", "Processing", "Stage-out"],
os.path.join(subdir, "time-pie"),
colors=["maroon", "crimson", "coral", "red", "dodgerblue", "green", "skyblue"]
)
workflows = []
colors = []
labels = []
for tasks, label, success_color, merged_color, merging_color in [
(success_tasks, 'processing',
'green', 'lightgreen', 'darkgreen'),
(merge_tasks, 'merging', 'purple', 'fuchsia', 'darkorchid')]:
code_map = {
2: (label + ' (status: successful)', success_color),
6: ('published', 'blue'),
7: (label + ' (status: merging)', merging_color),
8: (label + ' (status: merged)', merged_color)
}
codes, split_tasks = split_by_column(tasks, 'status')
workflows += [(x['time_retrieved'], [1] *
len(x['time_retrieved'])) for x in split_tasks]
colors += [code_map[code][1] for code in codes]
labels += [code_map[code][0] for code in codes]
if len(failed_tasks) > 0:
workflows += [(x['time_retrieved'], [1] *
len(x['time_retrieved'])) for x in [failed_tasks]]
colors += ['red']
labels += ['failed']
self.plot(
workflows,
'tasks', os.path.join(subdir, 'all-tasks'),
modes=[Plotter.HIST | Plotter.TIME],
label=labels,
color=colors
)
if len(good_tasks) > 0:
output, bins = np.histogram(
success_tasks['time_retrieved'], 100,
weights=success_tasks['bytes_output'] / 1024.0 ** 3
)
total_output = np.cumsum(output)
centers = [(x + y) / 2 for x, y in zip(bins[:-1], bins[1:])]
scale = 3600.0 / ((bins[1] - bins[0]) * 1024.0 ** 3)
self.plot(
[(success_tasks['time_retrieved'],
success_tasks['bytes_output'] * scale)],
'Output / (GB/h)', os.path.join(subdir, 'output'),
bins=50,
modes=[Plotter.HIST | Plotter.TIME]
)
self.plot(
[(centers, total_output)],
'Output / GB', os.path.join(subdir, 'output-total'),
bins=50,
modes=[Plotter.PLOT | Plotter.TIME]
)
for prefix, tasks in [('good-', success_tasks), ('merge-', merge_tasks)]:
if len(tasks) == 0:
continue
cache_map = {0: ('cold cache', 'lightskyblue'), 1: (
'hot cache', 'navy'), 2: ('dedicated', 'darkorchid')}
cache, split_tasks = split_by_column(tasks, 'cache')
# plot timeline
things_we_are_looking_at = [
# x-times , y-times
# , y-label , filestub ,
# color , in pie
([(x['time_wrapper_start'], x['time_total_until_worker_failure'])
for x in split_tasks], 'Eviction', 'eviction', "crimson", False), # red
([(x['time_wrapper_start'], x['time_total_exhausted_execution'])
for x in split_tasks], 'Exhausted resources', 'exhaustion', "coral", False), # orange
([(x['time_wrapper_start'], x['time_processing_end'] - x['time_wrapper_start'])
for x in split_tasks], 'Runtime', 'runtime', "green", False), # red
([(x['time_wrapper_start'], x['time_transfer_in_end'] - x['time_transfer_in_start'])
for x in split_tasks], 'Input transfer', 'transfer-in', "black", True), # gray
([(x['time_wrapper_start'], x['time_wrapper_start'] - x['time_transfer_in_end'])
for x in split_tasks], 'Startup', 'startup', "darkorchid", True), # blue
([(x['time_wrapper_start'], x['time_wrapper_ready'] - x['time_wrapper_start'])
for x in split_tasks], 'Release setup', 'setup-release', "navy", True), # blue
([(x['time_wrapper_start'], x['time_stage_in_end'] - x['time_wrapper_ready'])
for x in split_tasks], 'Stage-in', 'stage-in', "gray", True), # gray
([(x['time_wrapper_start'], x['time_prologue_end'] - x['time_stage_in_end'])
for x in split_tasks], 'Prologue', 'prologue', "orange", True), # yellow
([(x['time_wrapper_start'], x['time_wrapper_ready'] - x['time_wrapper_start'])
for x in split_tasks], 'Overhead', 'overhead', "blue", False), # blue]
([(x['time_wrapper_start'], x['time_processing_end'] - x['time_prologue_end'])
for x in split_tasks], 'Executable', 'processing', "forestgreen", True), # green
([(x['time_wrapper_start'], x['time_epilogue_end'] - x['time_processing_end'])
for x in split_tasks], 'Epilogue', 'epilogue', "khaki", True), # yellow
([(x['time_wrapper_start'], x['time_stage_out_end'] - x['time_epilogue_end'])
for x in split_tasks], 'Stage-out', 'stage-out', "silver", True), # gray
([(x['time_wrapper_start'], x['time_transfer_out_start'] - x['time_stage_out_end'])
for x in split_tasks], 'Output transfer wait', 'transfer-out-wait', "lightskyblue", True), # blue
([(x['time_wrapper_start'], x['time_transfer_out_end'] - x['time_transfer_out_start'])
for x in split_tasks], 'Output transfer work_queue', 'transfer-out-wq', "gainsboro", True) # gray
]
times_by_cache = [plot[0]
for plot in things_we_are_looking_at if plot[-1]]
self.pie(
[np.sum([np.sum(x[1]) for x in times])
for times in times_by_cache],
[plot[1] for plot in things_we_are_looking_at if plot[-1]],
os.path.join(subdir, prefix + "time-detail-pie"),
colors=[plot[-2]
for plot in things_we_are_looking_at if plot[-1]]
)
for a, label, filestub, color, pie in things_we_are_looking_at:
self.plot(
[(xtimes, ytimes / 60.) for xtimes, ytimes in a],
label +
' / m', os.path.join(subdir, prefix + filestub),
color=[cache_map[x][1] for x in cache],
label=[cache_map[x][0] for x in cache]
)
self.plot(
[(tasks['time_retrieved'], tasks['cores'])],
'cores', os.path.join(subdir, prefix + 'cores'),
)
self.plot(
[
(tasks['time_retrieved'], tasks['memory_resident']),
(tasks['time_retrieved'], tasks['memory_virtual']),
(tasks['time_retrieved'], tasks['memory_swap'])
],
'memory / MB', os.path.join(subdir, prefix + 'memory'),
label=['resident', 'virtual', 'swap']
)
self.plot(
[(tasks['time_retrieved'], tasks['workdir_footprint'])],
'working directory footprint / MB', os.path.join(
subdir, prefix + 'workdir-footprint'),
)
bandwidth = tasks['network_bytes_received'] * 8 / \
1e6 / tasks['time_on_worker']
self.plot(
[(tasks['time_retrieved'], bandwidth)],
'bandwidth / Mb/s', os.path.join(subdir,
prefix + 'network-bandwidth'),
modes=[Plotter.PROF | Plotter.TIME]
)
efficiency = tasks['time_cpu'] / (1. * tasks['cores'] * (
tasks['time_processing_end'] - tasks['time_prologue_end']))
self.plot(
[(tasks['time_retrieved'], efficiency)],
'Executable CPU/Wall time', os.path.join(
subdir, prefix + 'exe-efficiency'),
modes=[Plotter.HIST]
)
for resource, unit_ in (('cores', ''), ('disk', '/ MB'), ('memory', '/ MB')):
self.plot(
[(tasks['time_transfer_in_start'],
tasks['allocated_' + resource])],
'allocated {} {}'.format(resource, unit_).strip(),
os.path.join(subdir, prefix + 'allocated-' + resource),
modes=[Plotter.PROF | Plotter.TIME]
)
wflows, wtasks = split_by_column(tasks, 'workflow')
self.plot(
[(t['time_submit'], t['units']) for t in wtasks],
'task size / units', os.path.join(subdir,
prefix + 'tasksize'),
modes=[Plotter.PROF | Plotter.TIME],
label=[self.wflow_labels[w] for w in wflows]
)
self.plot(
[(tasks['time_retrieved'], tasks['exhausted_attempts'])],
'exhausted attempts', os.path.join(
subdir, prefix + 'exhausted-attempts'),
)
if len(failed_tasks) > 0:
logs = self.savelogs(failed_tasks)
fail_labels, fail_values = split_by_column(
failed_tasks, 'exit_code', threshold=0.025)
self.pie(
[len(xs['time_retrieved']) for xs in fail_values],
fail_labels,
os.path.join(subdir, "failed-pie")
)
self.plot(
[(xs['time_retrieved'], [1] * len(xs['time_retrieved']))
for xs in fail_values],
'Failed tasks', os.path.join(subdir, 'failed-tasks'),
modes=[Plotter.HIST | Plotter.TIME],
label=map(str, fail_labels)
)
self.plot(
[(tasks['time_retrieved'], tasks['cores'])],
'cores', os.path.join(subdir, 'failed-cores'),
)
self.plot(
[
(failed_tasks['time_retrieved'],
failed_tasks['memory_resident']),
(failed_tasks['time_retrieved'],
failed_tasks['memory_virtual']),
(failed_tasks['time_retrieved'],
failed_tasks['memory_swap'])
],
'memory / MB', os.path.join(subdir, 'failed-memory'),
label=['resident', 'virtual', 'swap']
)
self.plot(
[(failed_tasks['time_retrieved'], failed_tasks['workdir_footprint'])],
'working directory footprint / MB', os.path.join(
subdir, 'failed-workdir-footprint'),
)
self.plot(
[(failed_tasks['time_retrieved'], failed_tasks['exhausted_attempts'])],
'exhausted attempts', os.path.join(
subdir, 'failed-exhausted-attempts'),
)
else:
logs = None
return logs
def make_plots(self, xmin=None, xmax=None, foremen=None):
self.__plotargs = []
self.__xmin = self.parsetime(xmin)
self.__xmax = self.parsetime(xmax)
self.__foremen = foremen if foremen else []
# readlog() determines the time bounds of sql queries if not
# specified explicitly.
self.__category_stats = {'all': self.readlog()}
for category in self.config.categories:
label = category.name
if label == 'merge':
continue
self.__category_stats[label] = self.readlog(category=label)
good_tasks, failed_tasks, summary_data, completed_units, total_units, start_units, units_processed, transfers = self.readdb()
success_tasks = good_tasks[good_tasks['type'] == 0] if len(
good_tasks) > 0 else np.array([], good_tasks.dtype)
merge_tasks = good_tasks[good_tasks['type'] == 1] if len(
good_tasks) > 0 else np.array([], good_tasks.dtype)
# -------------
# General plots
# -------------
foremen_names = self.make_foreman_plots()
if len(good_tasks) > 0:
completed, bins = np.histogram(
completed_units['time_retrieved'], 100)
total_completed = np.cumsum(completed)
centers = [(x + y) / 2 for x, y in zip(bins[:-1], bins[1:])]
self.plot(
[(centers, total_completed * (-1.) + start_units)],
'units remaining', 'units-total',
bins=50,
modes=[Plotter.PLOT | Plotter.TIME]
)
data = []
labels = []
for label, (headers, stats) in self.__category_stats.items():
data.append((stats[:, headers['timestamp']],
stats[:, headers['tasks_running']]))
labels.append(label)
self.plot(
data, 'Tasks running', 'tasks',
modes=[Plotter.PLOT | Plotter.TIME],
label=labels
)
# ----------
# Templating
# ----------
categories = [
c.name for c in self.config.categories if c.name != 'merge']
category_summary_data = []
def date2string(d):
return datetime.fromtimestamp(d).strftime('%a, %d %b %Y, %H:%M')
def istotal(s):
return s == "Total"
env = jinja2.Environment(loader=jinja2.FileSystemLoader(
os.path.join(os.path.dirname(__file__), 'data')))
env.filters["datetime"] = date2string
env.tests["sum"] = istotal
overview = env.get_template('index.html')
wflow = env.get_template('category.html')
jsons = self.savejsons(units_processed)
shutil.copy(os.path.join(self.config.workdir, 'config.py'),
os.path.join(self.__plotdir, 'config.py'))
for fn in ['styles.css', 'gh.png']:
shutil.copy(os.path.join(os.path.dirname(__file__), 'data', fn),
os.path.join(self.__plotdir, fn))
# -----------------------
# Category specific plots
# -----------------------
logdir = os.path.join(self.__plotdir, 'logs')
if os.path.exists(logdir):
for dirpath, dirnames, filenames in os.walk(logdir):
logs = [os.path.join(dirpath, fn)
for fn in filenames if fn.endswith('.log')]
map(os.unlink, logs)
else:
os.makedirs(logdir)
outdir = os.path.join(self.__plotdir, 'all')
if not os.path.exists(outdir):
os.makedirs(outdir)
edges = self.make_master_plots('all', good_tasks, success_tasks)
logs = self.make_workflow_plots(
'all', edges, good_tasks, failed_tasks, success_tasks, merge_tasks, xmin, xmax)
labels = [w.label for w in self.config.workflows]
with open(os.path.join(self.__plotdir, 'all', 'index.html'), 'w') as f:
f.write(wflow.render(
id=self.config.label,
label='all workflows',
bad_tasks=len(failed_tasks) > 0,
good_tasks=len(success_tasks) > 0,
merge_tasks=len(merge_tasks) > 0,
summary=summary_data,
jsons=jsons,
bad_logs=logs,
bad_hosts=self.find_failure_hosts(failed_tasks),
foremen=foremen_names,
categories=categories,
transfers=self.merge_transfers(transfers, labels)
).encode('utf-8'))
def add_total(summaries):
numbers = zip(*[s[1:-2] for s in summaries])
total = map(sum, numbers)
total_mergeable = sum([s[-9] for s in summaries if
getattr(self.config.workflows, s[0], None) and
getattr(self.config.workflows, s[0]).merge_size > 0])
return summaries + \
[['Total'] + total + [
'{} %'.format(round(total[-6] * 100. / total[-7] if total[-7] > 0 else 0, 1)),
'{} %'.format(round(total[-5] * 100. / total_mergeable if total_mergeable > 0 else 0, 1))
]]
for category in self.config.categories:
label = category.name
if label == 'merge':
continue
ids = []
labels = []
for workflow in self.config.workflows:
if workflow.category == category:
ids.append(self.wflow_ids[workflow.label])
labels.append(workflow.label)
outdir = os.path.join(self.__plotdir, label)
if not os.path.exists(outdir):
os.makedirs(outdir)
wf_good_tasks = good_tasks[np.in1d(good_tasks['workflow'], ids)]
wf_failed_tasks = failed_tasks[
np.in1d(failed_tasks['workflow'], ids)]
wf_success_tasks = success_tasks[
np.in1d(success_tasks['workflow'], ids)]
wf_merge_tasks = merge_tasks[np.in1d(merge_tasks['workflow'], ids)]
self.make_master_plots(label, wf_good_tasks, wf_success_tasks)
logs = self.make_workflow_plots(label, edges,
wf_good_tasks,
wf_failed_tasks,
wf_success_tasks,
wf_merge_tasks,
xmin, xmax)
summary = add_total([xs for xs in summary_data if xs[0] in labels])
category_summary_data.append([label] + summary[-1][1:])
with open(os.path.join(self.__plotdir, label, 'index.html'), 'w') as f:
f.write(wflow.render(
id=self.config.label,
label=label,
bad_tasks=len(wf_failed_tasks) > 0,
good_tasks=len(wf_success_tasks) > 0,
merge_tasks=len(wf_merge_tasks) > 0,
summary=summary,
jsons=jsons,
bad_logs=logs,
bad_hosts=self.find_failure_hosts(wf_failed_tasks),
foremen=foremen_names,
categories=categories,
transfers=self.merge_transfers(transfers, labels)
).encode('utf-8'))
# Add the total from the unit store query
category_summary_data.append(summary_data[-1])
with open(os.path.join(self.__plotdir, 'index.html'), 'w') as f:
f.write(overview.render(
id=self.config.label,
plot_time=time.time(),
plot_starttime=self.__xmin,
plot_endtime=self.__xmax,
run_starttime=self.__total_xmin,
run_endtime=self.__total_xmax,
summary=category_summary_data,
bad_tasks=len(failed_tasks) > 0,
good_tasks=len(success_tasks) > 0,
foremen=foremen_names,
categories=categories
).encode('utf-8'))
p = multiprocessing.Pool(10, reset_signals)
p.map(mp_call, self.__plotargs)
p.close()
p.join()
class Plot(Command):
@property
def help(self):
return "plot progess of processing"
def setup(self, argparser):
argparser.add_argument("--from", default=None, metavar="START", dest="xmin",
help="plot data from START. Valid values: 1970-01-30, 1970-01-30_00:00, 00:00")
argparser.add_argument("--to", default=None, metavar="END", dest="xmax",
help="plot data until END. Valid values: 1970-01-30, 1970-01-30_00:00, 00:00")
argparser.add_argument("--foreman-logs", default=None, metavar="FOREMAN_LIST", dest="foreman_list", nargs='+', type=str,
help="specify log files for foremen; valid values: log1 log2 log3...logN")
argparser.add_argument('--paper', action='store_true', default=False,
help="use black & white style for publications")
argparser.add_argument('--outdir', help="specify output directory")
def run(self, args):
p = Plotter(args.config, args.outdir, args.paper)
p.make_plots(args.xmin, args.xmax, args.foreman_list)
| mit |
ethertricity/bluesky | bluesky/traffic/performance/nap/coeff.py | 1 | 5989 | ''' NAP performance library. '''
import os
import json
import numpy as np
import pandas as pd
from bluesky import settings
settings.set_variable_defaults(perf_path_nap="data/performance/NAP")
# nap_path = os.path.dirname(os.path.realpath(__file__)) \
# + '/../../../../data/performance/NAP/'
ENG_TF = 1
ENG_TP = 2
ENG_PS = 3
db_aircraft = settings.perf_path_nap + "/aircraft.json"
db_engine = settings.perf_path_nap + "/engines.csv"
envelope_dir = settings.perf_path_nap + "/envelop/"
class Coefficient():
def __init__(self):
self.acs = self.__load_all_aircraft_flavor()
self.engines = pd.read_csv(db_engine, encoding='utf-8')
self.limits = self.__load_all_aircraft_envelop()
def __load_all_aircraft_flavor(self):
import warnings
warnings.simplefilter("ignore")
# read aircraft and engine files
allengines = pd.read_csv(db_engine, encoding='utf-8')
acs = json.load(open(db_aircraft, 'r'))
acs.pop('__comment')
for mdl, ac in acs.items():
acengines = ac['engines']
acs[mdl]['engines'] = {}
for e in acengines:
e = e.strip().upper()
selengine = allengines[allengines['name'].str.startswith(e)]
if selengine.shape[0] >= 1:
engine = json.loads(selengine.iloc[-1, :].to_json())
acs[mdl]['engines'][engine['name']] = engine
return acs
def __load_all_aircraft_envelop(self):
""" load aircraft envelop from the model database,
All unit in SI"""
limits = {}
for mdl, ac in self.acs.items():
fenv = envelope_dir + mdl.lower() + '.csv'
if os.path.exists(fenv):
df = pd.read_csv(fenv, index_col='param')
limits[mdl] = {}
limits[mdl]['vminto'] = df.loc['to_v_lof']['min']
limits[mdl]['vmaxto'] = df.loc['to_v_lof']['max']
limits[mdl]['vminic'] = df.loc['ic_va_avg']['min']
limits[mdl]['vmaxic'] = df.loc['ic_va_avg']['max']
limits[mdl]['vminer'] = min(df.loc['cl_v_cas_const']['min'],
df.loc['cr_v_cas_mean']['min'],
df.loc['de_v_cas_const']['min'])
limits[mdl]['vmaxer'] = min(df.loc['cl_v_cas_const']['max'],
df.loc['cr_v_cas_mean']['max'],
df.loc['de_v_cas_const']['max'])
limits[mdl]['vminap'] = df.loc['fa_va_avg']['min']
limits[mdl]['vmaxap'] = df.loc['fa_va_avg']['max']
limits[mdl]['vminld'] = df.loc['ld_v_app']['min']
limits[mdl]['vmaxld'] = df.loc['ld_v_app']['max']
limits[mdl]['vmo'] = limits[mdl]['vmaxer']
limits[mdl]['mmo'] = df.loc['cr_v_mach_max']['opt']
limits[mdl]['hmaxalt'] = df.loc['cr_h_max']['opt'] * 1000
limits[mdl]['crosscl'] = df.loc['cl_h_mach_const']['opt']
limits[mdl]['crossde'] = df.loc['de_h_cas_const']['opt']
limits[mdl]['amaxhoriz'] = df.loc['to_acc_tof']['max']
limits[mdl]['vsmax'] = max(df.loc['ic_vh_avg']['max'],
df.loc['cl_vh_avg_pre_cas']['max'],
df.loc['cl_vh_avg_cas_const']['max'],
df.loc['cl_vh_avg_mach_const']['max'])
limits[mdl]['vsmin'] = min(df.loc['ic_vh_avg']['min'],
df.loc['de_vh_avg_after_cas']['min'],
df.loc['de_vh_avg_cas_const']['min'],
df.loc['de_vh_avg_mach_const']['min'])
# limits['amaxverti'] = None # max vertical acceleration (m/s2)
return limits
def get_aircraft(self, mdl):
mdl = mdl.upper()
if mdl in self.acs:
return acs[mdl]
else:
raise RuntimeError('Aircraft data not found')
def get_engine(self, eng):
eng = eng.strip().upper()
selengine = self.engines[self.engines['name'].str.startswith(eng)]
if selengine.shape[0] == 0:
raise RuntimeError('Engine data not found')
if selengine.shape[0] > 1:
warnings.warn('Multiple engines data found, last one returned. \n\
matching engines are: %s' % selengine.name.tolist())
return json.loads(selengine.iloc[-1, :].to_json())
def get_ac_default_engine(self, mdl):
ac = get_aircraft(mdl)
engnames = list(ac['engines'].key())
eng = ac['engines'][engnames[0]]
return eng
def get_initial_values(self, actypes):
"""construct a matrix of initial parameters"""
actypes = np.array(actypes)
engtypes = {
'TF': ENG_TF,
'TP': ENG_TP,
'PS': ENG_PS,
}
n = len(actypes)
params = np.zeros((n, 7))
unique_ac_mdls = np.unique(actypes)
for mdl in unique_ac_mdls:
allengs = list(self.acs[mdl]['engines'].keys())
params[:, 0] = np.where(actypes==mdl, self.acs[mdl]['wa'], params[:, 0])
params[:, 1] = np.where(actypes==mdl, self.acs[mdl]['oew'], params[:, 1])
params[:, 2] = np.where(actypes==mdl, self.acs[mdl]['mtow'], params[:, 2])
params[:, 3] = np.where(actypes==mdl, self.acs[mdl]['n_engines'], params[:, 3])
params[:, 4] = np.where(actypes==mdl, engtypes[self.acs[mdl]['engine_type']], params[:, 4])
params[:, 5] = np.where(actypes==mdl, self.acs[mdl]['engines'][allengs[0]]['thr'], params[:, 5])
params[:, 6] = np.where(actypes==mdl, self.acs[mdl]['engines'][allengs[0]]['bpr'], params[:, 6])
return params
| gpl-3.0 |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/sklearn/gaussian_process/gpc.py | 13 | 31632 | """Gaussian processes classification."""
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve
from scipy.optimize import fmin_l_bfgs_b
from scipy.special import erf
from sklearn.base import BaseEstimator, ClassifierMixin, clone
from sklearn.gaussian_process.kernels \
import RBF, CompoundKernel, ConstantKernel as C
from sklearn.utils.validation import check_X_y, check_is_fitted, check_array
from sklearn.utils import check_random_state
from sklearn.preprocessing import LabelEncoder
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
# Values required for approximating the logistic sigmoid by
# error functions. coefs are obtained via:
# x = np.array([0, 0.6, 2, 3.5, 4.5, np.inf])
# b = logistic(x)
# A = (erf(np.dot(x, self.lambdas)) + 1) / 2
# coefs = lstsq(A, b)[0]
LAMBDAS = np.array([0.41, 0.4, 0.37, 0.44, 0.39])[:, np.newaxis]
COEFS = np.array([-1854.8214151, 3516.89893646, 221.29346712,
128.12323805, -2010.49422654])[:, np.newaxis]
class _BinaryGaussianProcessClassifierLaplace(BaseEstimator):
"""Binary Gaussian process classification based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
``Gaussian Processes for Machine Learning'' (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function.
.. versionadded:: 0.18
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer: int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict: int, optional (default: 100)
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, optional (default: False)
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_: array-like, shape = (n_samples,)
Target values in training data (also required for prediction)
classes_ : array-like, shape = (n_classes,)
Unique class labels.
kernel_: kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_: array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in X_train_
pi_: array-like, shape = (n_samples,)
The probabilities of the positive class for the training points
X_train_
W_sr_: array-like, shape = (n_samples,)
Square root of W, the Hessian of log-likelihood of the latent function
values for the observed labels. Since W is diagonal, only the diagonal
of sqrt(W) is stored.
log_marginal_likelihood_value_: float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0, max_iter_predict=100,
warm_start=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process classification model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples,)
Target values, must be binary
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self.rng = check_random_state(self.random_state)
self.X_train_ = np.copy(X) if self.copy_X_train else X
# Encode class labels and check that it is a binary classification
# problem
label_encoder = LabelEncoder()
self.y_train_ = label_encoder.fit_transform(y)
self.classes_ = label_encoder.classes_
if self.classes_.size > 2:
raise ValueError("%s supports only binary classification. "
"y contains classes %s"
% (self.__class__.__name__, self.classes_))
elif self.classes_.size == 1:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds)]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = np.exp(self.rng.uniform(bounds[:, 0],
bounds[:, 1]))
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
_, (self.pi_, self.W_sr_, self.L_, _, _) = \
self._posterior_mode(K, return_temporaries=True)
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array, shape = (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"])
# As discussed on Section 3.4.2 of GPML, for making hard binary
# decisions, it is enough to compute the MAP of the posterior and
# pass it through the link function
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Algorithm 3.2,Line 4
return np.where(f_star > 0, self.classes_[1], self.classes_[0])
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array-like, shape = (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute ``classes_``.
"""
check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"])
# Based on Algorithm 3.2 of GPML
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Line 4
v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5
# Line 6 (compute np.diag(v.T.dot(v)) via einsum)
var_f_star = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v)
# Line 7:
# Approximate \int log(z) * N(z | f_star, var_f_star)
# Approximation is due to Williams & Barber, "Bayesian Classification
# with Gaussian Processes", Appendix A: Approximate the logistic
# sigmoid by a linear combination of 5 error functions.
# For information on how this integral can be computed see
# blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html
alpha = 1 / (2 * var_f_star)
gamma = LAMBDAS * f_star
integrals = np.sqrt(np.pi / alpha) \
* erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS**2))) \
/ (2 * np.sqrt(var_f_star * 2 * np.pi))
pi_star = (COEFS * integrals).sum(axis=0) + .5 * COEFS.sum()
return np.vstack((1 - pi_star, pi_star)).T
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
# Compute log-marginal-likelihood Z and also store some temporaries
# which can be reused for computing Z's gradient
Z, (pi, W_sr, L, b, a) = \
self._posterior_mode(K, return_temporaries=True)
if not eval_gradient:
return Z
# Compute gradient based on Algorithm 5.1 of GPML
d_Z = np.empty(theta.shape[0])
# XXX: Get rid of the np.diag() in the next line
R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr)) # Line 7
C = solve(L, W_sr[:, np.newaxis] * K) # Line 8
# Line 9: (use einsum to compute np.diag(C.T.dot(C))))
s_2 = -0.5 * (np.diag(K) - np.einsum('ij, ij -> j', C, C)) \
* (pi * (1 - pi) * (1 - 2 * pi)) # third derivative
for j in range(d_Z.shape[0]):
C = K_gradient[:, :, j] # Line 11
# Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C)))
s_1 = .5 * a.T.dot(C).dot(a) - .5 * R.T.ravel().dot(C.ravel())
b = C.dot(self.y_train_ - pi) # Line 13
s_3 = b - K.dot(R.dot(b)) # Line 14
d_Z[j] = s_1 + s_2.T.dot(s_3) # Line 15
return Z, d_Z
def _posterior_mode(self, K, return_temporaries=False):
"""Mode-finding for binary Laplace GPC and fixed kernel.
This approximates the posterior of the latent function values for given
inputs and target observations with a Gaussian approximation and uses
Newton's iteration to find the mode of this approximation.
"""
# Based on Algorithm 3.1 of GPML
# If warm_start are enabled, we reuse the last solution for the
# posterior mode as initialization; otherwise, we initialize with 0
if self.warm_start and hasattr(self, "f_cached") \
and self.f_cached.shape == self.y_train_.shape:
f = self.f_cached
else:
f = np.zeros_like(self.y_train_, dtype=np.float64)
# Use Newton's iteration method to find mode of Laplace approximation
log_marginal_likelihood = -np.inf
for _ in range(self.max_iter_predict):
# Line 4
pi = 1 / (1 + np.exp(-f))
W = pi * (1 - pi)
# Line 5
W_sr = np.sqrt(W)
W_sr_K = W_sr[:, np.newaxis] * K
B = np.eye(W.shape[0]) + W_sr_K * W_sr
L = cholesky(B, lower=True)
# Line 6
b = W * f + (self.y_train_ - pi)
# Line 7
a = b - W_sr * cho_solve((L, True), W_sr_K.dot(b))
# Line 8
f = K.dot(a)
# Line 10: Compute log marginal likelihood in loop and use as
# convergence criterion
lml = -0.5 * a.T.dot(f) \
- np.log(1 + np.exp(-(self.y_train_ * 2 - 1) * f)).sum() \
- np.log(np.diag(L)).sum()
# Check if we have converged (log marginal likelihood does
# not decrease)
# XXX: more complex convergence criterion
if lml - log_marginal_likelihood < 1e-10:
break
log_marginal_likelihood = lml
self.f_cached = f # Remember solution for later warm-starts
if return_temporaries:
return log_marginal_likelihood, (pi, W_sr, L, b, a)
else:
return log_marginal_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
class GaussianProcessClassifier(BaseEstimator, ClassifierMixin):
"""Gaussian process classification (GPC) based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
Gaussian Processes for Machine Learning (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function. For multi-class classification, several binary one-versus rest
classifiers are fitted. Note that this class thus does not implement
a true multi-class Laplace approximation.
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer : int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict : int, optional (default: 100)
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, optional (default: False)
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
multi_class: string, default : "one_vs_rest"
Specifies how multi-class classification problems are handled.
Supported are "one_vs_rest" and "one_vs_one". In "one_vs_rest",
one binary Gaussian process classifier is fitted for each class, which
is trained to separate this class from the rest. In "one_vs_one", one
binary Gaussian process classifier is fitted for each pair of classes,
which is trained to separate these two classes. The predictions of
these binary predictors are combined into multi-class predictions.
Note that "one_vs_one" does not support predicting probability
estimates.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
kernel_ : kernel object
The kernel used for prediction. In case of binary classification,
the structure of the kernel is the same as the one passed as parameter
but with optimized hyperparameters. In case of multi-class
classification, a CompoundKernel is returned which consists of the
different kernels used in the one-versus-rest classifiers.
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
classes_ : array-like, shape = (n_classes,)
Unique class labels.
n_classes_ : int
The number of classes in the training data
.. versionadded:: 0.18
"""
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0, max_iter_predict=100,
warm_start=False, copy_X_train=True, random_state=None,
multi_class="one_vs_rest", n_jobs=1):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
self.multi_class = multi_class
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit Gaussian process classification model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples,)
Target values, must be binary
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=False)
self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace(
self.kernel, self.optimizer, self.n_restarts_optimizer,
self.max_iter_predict, self.warm_start, self.copy_X_train,
self.random_state)
self.classes_ = np.unique(y)
self.n_classes_ = self.classes_.size
if self.n_classes_ == 1:
raise ValueError("GaussianProcessClassifier requires 2 or more "
"distinct classes. Only class %s present."
% self.classes_[0])
if self.n_classes_ > 2:
if self.multi_class == "one_vs_rest":
self.base_estimator_ = \
OneVsRestClassifier(self.base_estimator_,
n_jobs=self.n_jobs)
elif self.multi_class == "one_vs_one":
self.base_estimator_ = \
OneVsOneClassifier(self.base_estimator_,
n_jobs=self.n_jobs)
else:
raise ValueError("Unknown multi-class mode %s"
% self.multi_class)
self.base_estimator_.fit(X, y)
if self.n_classes_ > 2:
self.log_marginal_likelihood_value_ = np.mean(
[estimator.log_marginal_likelihood()
for estimator in self.base_estimator_.estimators_])
else:
self.log_marginal_likelihood_value_ = \
self.base_estimator_.log_marginal_likelihood()
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array, shape = (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self, ["classes_", "n_classes_"])
X = check_array(X)
return self.base_estimator_.predict(X)
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array-like, shape = (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
check_is_fitted(self, ["classes_", "n_classes_"])
if self.n_classes_ > 2 and self.multi_class == "one_vs_one":
raise ValueError("one_vs_one multi-class mode does not support "
"predicting probability estimates. Use "
"one_vs_rest mode instead.")
X = check_array(X)
return self.base_estimator_.predict_proba(X)
@property
def kernel_(self):
if self.n_classes_ == 2:
return self.base_estimator_.kernel_
else:
return CompoundKernel(
[estimator.kernel_
for estimator in self.base_estimator_.estimators_])
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
In the case of multi-class classification, the mean log-marginal
likelihood of the one-versus-rest classifiers are returned.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or none
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. In the case of multi-class classification, theta may
be the hyperparameters of the compound kernel or of an individual
kernel. In the latter case, all individual kernel get assigned the
same theta values. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. Note that gradient computation is not supported
for non-binary classification. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
check_is_fitted(self, ["classes_", "n_classes_"])
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
theta = np.asarray(theta)
if self.n_classes_ == 2:
return self.base_estimator_.log_marginal_likelihood(
theta, eval_gradient)
else:
if eval_gradient:
raise NotImplementedError(
"Gradient of log-marginal-likelihood not implemented for "
"multi-class GPC.")
estimators = self.base_estimator_.estimators_
n_dims = estimators[0].kernel_.n_dims
if theta.shape[0] == n_dims: # use same theta for all sub-kernels
return np.mean(
[estimator.log_marginal_likelihood(theta)
for i, estimator in enumerate(estimators)])
elif theta.shape[0] == n_dims * self.classes_.shape[0]:
# theta for compound kernel
return np.mean(
[estimator.log_marginal_likelihood(
theta[n_dims * i:n_dims * (i + 1)])
for i, estimator in enumerate(estimators)])
else:
raise ValueError("Shape of theta must be either %d or %d. "
"Obtained theta with shape %d."
% (n_dims, n_dims * self.classes_.shape[0],
theta.shape[0]))
| mit |
dmnfarrell/epitopepredict | epitopepredict/config.py | 2 | 5239 | #!/usr/bin/env python
"""
epitopepredict config
Created March 2016
Copyright (C) Damien Farrell
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 3
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from __future__ import absolute_import, print_function
import sys, os, string, time
import types, re, subprocess, glob, shutil
from collections import OrderedDict
import pandas as pd
try:
import configparser
except:
import ConfigParser as configparser
path = os.path.dirname(os.path.abspath(__file__))
datadir = os.path.join(path, 'data')
home = os.path.expanduser("~")
config_path = os.path.join(home, '.config/epitopepredict')
baseoptions = OrderedDict()
baseoptions['base'] = {'predictors': 'tepitope',
'mhc2_alleles':'HLA-DRB1*01:01,HLA-DRB1*04:01',
'mhc1_alleles':'HLA-A*01:01',
'mhc1_length': 11,
'mhc2_length': 11,
'n': 2, #number of alleles
'cutoff_method': 'default',
'cutoffs': .95, #percentile cutoff
'sequence_file':'', #genbank/fasta file
'peptide_file':'', #plain text list of peptides
'path': 'results',
'overwrite': 'no',
'verbose':'no',
'names': '', #subset of protein names
'overwrite': 'no',
'threads': 1,
'compression': '',
'fasta_header_sep': ' '}
baseoptions['iedbtools'] = {'iedbmhc1_path':'', 'iedbmhc2_path':'',
'iedb_mhc1_method':'IEDB_recommended',
'iedb_mhc2_method':'IEDB_recommended'}
baseoptions['neopredict'] = {'vcf_files':'', 'ensembl_release':'75',
'selection_method':'promiscuity'}
def write_default_config():
"""Write a default config to users .config folder. Used to add global settings."""
fname = os.path.join(config_path, 'default.conf')
os.makedirs(config_path, exist_ok=True)
if not os.path.exists(fname):
write_config(conffile=fname, defaults=baseoptions)
return fname
def write_config(conffile='default.conf', defaults={}):
"""Write a default config file"""
if not os.path.exists(conffile):
cp = create_config_parser_from_dict(defaults, ['base','iedbtools'])
cp.write(open(conffile,'w'))
print ('wrote config file %s' %conffile)
return conffile
def create_config_parser_from_dict(data=None, sections=['base','iedbtools'], **kwargs):
"""Helper method to create a ConfigParser from a dict of the form shown in
baseoptions"""
if data is None:
data = baseoptions
#print (data)
cp = configparser.ConfigParser()
for s in sections:
cp.add_section(s)
if not s in data:
continue
for name in sorted(data[s]):
val = data[s][name]
if type(val) is list:
val = ','.join(val)
cp.set(s, name, str(val))
#use kwargs to create specific settings in the appropriate section
for s in cp.sections():
opts = cp.options(s)
for k in kwargs:
if k in opts:
cp.set(s, k, kwargs[k])
return cp
def parse_config(conffile=None):
"""Parse a configparser file"""
f = open(conffile,'r')
cp = configparser.ConfigParser()
try:
cp.read(conffile)
except Exception as e:
print ('failed to read config file! check format')
print ('Error returned:', e)
return
f.close()
return cp
def get_options(cp):
"""Makes sure boolean opts are parsed"""
from collections import OrderedDict
options = OrderedDict()
#options = cp._sections['base']
for section in cp.sections():
options.update( (cp._sections[section]) )
for o in options:
for section in cp.sections():
try:
options[o] = cp.getboolean(section, o)
except:
pass
try:
options[o] = cp.getint(section, o)
except:
pass
return options
def print_options(options):
"""Print option key/value pairs"""
for key in options:
print (key, ':', options[key])
print ()
def check_options(opts):
"""Check for missing default options in dict. Meant to handle
incomplete config files"""
sections = list(baseoptions.keys())
for s in sections:
defaults = dict(baseoptions[s])
for i in defaults:
if i not in opts:
opts[i] = defaults[i]
return opts
| apache-2.0 |
GuessWhoSamFoo/pandas | pandas/tests/reshape/test_reshape.py | 1 | 25038 | # -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
from collections import OrderedDict
import numpy as np
from numpy import nan
import pytest
from pandas.compat import u
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import Categorical, DataFrame, Index, Series, get_dummies
from pandas.core.sparse.api import SparseArray, SparseDtype
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal
class TestGetDummies(object):
@pytest.fixture
def df(self):
return DataFrame({'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
@pytest.fixture(params=['uint8', 'i8', np.float64, bool, None])
def dtype(self, request):
return np.dtype(request.param)
@pytest.fixture(params=['dense', 'sparse'])
def sparse(self, request):
# params are strings to simplify reading test results,
# e.g. TestGetDummies::test_basic[uint8-sparse] instead of [uint8-True]
return request.param == 'sparse'
def effective_dtype(self, dtype):
if dtype is None:
return np.uint8
return dtype
def test_raises_on_dtype_object(self, df):
with pytest.raises(ValueError):
get_dummies(df, dtype='object')
def test_basic(self, sparse, dtype):
s_list = list('abc')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype=self.effective_dtype(dtype))
if sparse:
expected = expected.apply(pd.SparseArray, fill_value=0.0)
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
assert_frame_equal(result, expected)
expected.index = list('ABC')
result = get_dummies(s_series_index, sparse=sparse, dtype=dtype)
assert_frame_equal(result, expected)
def test_basic_types(self, sparse, dtype):
# GH 10531
s_list = list('abc')
s_series = Series(s_list)
s_df = DataFrame({'a': [0, 1, 0, 1, 2],
'b': ['A', 'A', 'B', 'C', 'C'],
'c': [2, 3, 3, 3, 2]})
expected = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype=self.effective_dtype(dtype),
columns=list('abc'))
if sparse:
if is_integer_dtype(dtype):
fill_value = 0
elif dtype == bool:
fill_value = False
else:
fill_value = 0.0
expected = expected.apply(SparseArray, fill_value=fill_value)
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_df, columns=s_df.columns,
sparse=sparse, dtype=dtype)
if sparse:
dtype_name = 'Sparse[{}, {}]'.format(
self.effective_dtype(dtype).name,
fill_value
)
else:
dtype_name = self.effective_dtype(dtype).name
expected = Series({dtype_name: 8})
tm.assert_series_equal(result.get_dtype_counts(), expected)
result = get_dummies(s_df, columns=['a'], sparse=sparse, dtype=dtype)
expected_counts = {'int64': 1, 'object': 1}
expected_counts[dtype_name] = 3 + expected_counts.get(dtype_name, 0)
expected = Series(expected_counts).sort_index()
tm.assert_series_equal(result.get_dtype_counts().sort_index(),
expected)
def test_just_na(self, sparse):
just_na_list = [np.nan]
just_na_series = Series(just_na_list)
just_na_series_index = Series(just_na_list, index=['A'])
res_list = get_dummies(just_na_list, sparse=sparse)
res_series = get_dummies(just_na_series, sparse=sparse)
res_series_index = get_dummies(just_na_series_index, sparse=sparse)
assert res_list.empty
assert res_series.empty
assert res_series_index.empty
assert res_list.index.tolist() == [0]
assert res_series.index.tolist() == [0]
assert res_series_index.index.tolist() == ['A']
def test_include_na(self, sparse, dtype):
s = ['a', 'b', np.nan]
res = get_dummies(s, sparse=sparse, dtype=dtype)
exp = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0]},
dtype=self.effective_dtype(dtype))
if sparse:
exp = exp.apply(pd.SparseArray, fill_value=0.0)
assert_frame_equal(res, exp)
# Sparse dataframes do not allow nan labelled columns, see #GH8822
res_na = get_dummies(s, dummy_na=True, sparse=sparse, dtype=dtype)
exp_na = DataFrame({nan: [0, 0, 1],
'a': [1, 0, 0],
'b': [0, 1, 0]},
dtype=self.effective_dtype(dtype))
exp_na = exp_na.reindex(['a', 'b', nan], axis=1)
# hack (NaN handling in assert_index_equal)
exp_na.columns = res_na.columns
if sparse:
exp_na = exp_na.apply(pd.SparseArray, fill_value=0.0)
assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([nan], dummy_na=True,
sparse=sparse, dtype=dtype)
exp_just_na = DataFrame(Series(1, index=[0]), columns=[nan],
dtype=self.effective_dtype(dtype))
tm.assert_numpy_array_equal(res_just_na.values, exp_just_na.values)
def test_unicode(self, sparse):
# See GH 6885 - get_dummies chokes on unicode values
import unicodedata
e = 'e'
eacute = unicodedata.lookup('LATIN SMALL LETTER E WITH ACUTE')
s = [e, eacute, eacute]
res = get_dummies(s, prefix='letter', sparse=sparse)
exp = DataFrame({'letter_e': [1, 0, 0],
u('letter_%s') % eacute: [0, 1, 1]},
dtype=np.uint8)
if sparse:
exp = exp.apply(pd.SparseArray, fill_value=0)
assert_frame_equal(res, exp)
def test_dataframe_dummies_all_obj(self, df, sparse):
df = df[['A', 'B']]
result = get_dummies(df, sparse=sparse)
expected = DataFrame({'A_a': [1, 0, 1],
'A_b': [0, 1, 0],
'B_b': [1, 1, 0],
'B_c': [0, 0, 1]},
dtype=np.uint8)
if sparse:
expected = pd.DataFrame({
"A_a": pd.SparseArray([1, 0, 1], dtype='uint8'),
"A_b": pd.SparseArray([0, 1, 0], dtype='uint8'),
"B_b": pd.SparseArray([1, 1, 0], dtype='uint8'),
"B_c": pd.SparseArray([0, 0, 1], dtype='uint8'),
})
assert_frame_equal(result, expected)
def test_dataframe_dummies_mix_default(self, df, sparse, dtype):
result = get_dummies(df, sparse=sparse, dtype=dtype)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame({'C': [1, 2, 3],
'A_a': arr([1, 0, 1], dtype=typ),
'A_b': arr([0, 1, 0], dtype=typ),
'B_b': arr([1, 1, 0], dtype=typ),
'B_c': arr([0, 0, 1], dtype=typ)})
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_list(self, df, sparse):
prefixes = ['from_A', 'from_B']
result = get_dummies(df, prefix=prefixes, sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'from_B_b': [1, 1, 0],
'from_B_c': [0, 0, 1]},
dtype=np.uint8)
expected[['C']] = df[['C']]
cols = ['from_A_a', 'from_A_b', 'from_B_b', 'from_B_c']
expected = expected[['C'] + cols]
typ = pd.SparseArray if sparse else pd.Series
expected[cols] = expected[cols].apply(lambda x: typ(x))
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_str(self, df, sparse):
# not that you should do this...
result = get_dummies(df, prefix='bad', sparse=sparse)
bad_columns = ['bad_a', 'bad_b', 'bad_b', 'bad_c']
expected = DataFrame([[1, 1, 0, 1, 0],
[2, 0, 1, 1, 0],
[3, 1, 0, 0, 1]],
columns=['C'] + bad_columns,
dtype=np.uint8)
expected = expected.astype({"C": np.int64})
if sparse:
# work around astyping & assigning with duplicate columns
# https://github.com/pandas-dev/pandas/issues/14427
expected = pd.concat([
pd.Series([1, 2, 3], name='C'),
pd.Series([1, 0, 1], name='bad_a', dtype='Sparse[uint8]'),
pd.Series([0, 1, 0], name='bad_b', dtype='Sparse[uint8]'),
pd.Series([1, 1, 0], name='bad_b', dtype='Sparse[uint8]'),
pd.Series([0, 0, 1], name='bad_c', dtype='Sparse[uint8]'),
], axis=1)
assert_frame_equal(result, expected)
def test_dataframe_dummies_subset(self, df, sparse):
result = get_dummies(df, prefix=['from_A'], columns=['A'],
sparse=sparse)
expected = DataFrame({'B': ['b', 'b', 'c'],
'C': [1, 2, 3],
'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0]}, dtype=np.uint8)
expected[['C']] = df[['C']]
if sparse:
cols = ['from_A_a', 'from_A_b']
expected[cols] = expected[cols].apply(lambda x: pd.SparseSeries(x))
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_sep(self, df, sparse):
result = get_dummies(df, prefix_sep='..', sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'A..a': [1, 0, 1],
'A..b': [0, 1, 0],
'B..b': [1, 1, 0],
'B..c': [0, 0, 1]},
dtype=np.uint8)
expected[['C']] = df[['C']]
expected = expected[['C', 'A..a', 'A..b', 'B..b', 'B..c']]
if sparse:
cols = ['A..a', 'A..b', 'B..b', 'B..c']
expected[cols] = expected[cols].apply(lambda x: pd.SparseSeries(x))
assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep=['..', '__'], sparse=sparse)
expected = expected.rename(columns={'B..b': 'B__b', 'B..c': 'B__c'})
assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep={'A': '..', 'B': '__'},
sparse=sparse)
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_bad_length(self, df, sparse):
with pytest.raises(ValueError):
get_dummies(df, prefix=['too few'], sparse=sparse)
def test_dataframe_dummies_prefix_sep_bad_length(self, df, sparse):
with pytest.raises(ValueError):
get_dummies(df, prefix_sep=['bad'], sparse=sparse)
def test_dataframe_dummies_prefix_dict(self, sparse):
prefixes = {'A': 'from_A', 'B': 'from_B'}
df = DataFrame({'C': [1, 2, 3],
'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c']})
result = get_dummies(df, prefix=prefixes, sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'from_B_b': [1, 1, 0],
'from_B_c': [0, 0, 1]})
columns = ['from_A_a', 'from_A_b', 'from_B_b', 'from_B_c']
expected[columns] = expected[columns].astype(np.uint8)
if sparse:
expected[columns] = expected[columns].apply(
lambda x: pd.SparseSeries(x)
)
assert_frame_equal(result, expected)
def test_dataframe_dummies_with_na(self, df, sparse, dtype):
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True,
sparse=sparse, dtype=dtype).sort_index(axis=1)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame({'C': [1, 2, 3, np.nan],
'A_a': arr([1, 0, 1, 0], dtype=typ),
'A_b': arr([0, 1, 0, 0], dtype=typ),
'A_nan': arr([0, 0, 0, 1], dtype=typ),
'B_b': arr([1, 1, 0, 0], dtype=typ),
'B_c': arr([0, 0, 1, 0], dtype=typ),
'B_nan': arr([0, 0, 0, 1], dtype=typ)
}).sort_index(axis=1)
assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, sparse=sparse, dtype=dtype)
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_with_categorical(self, df, sparse, dtype):
df['cat'] = pd.Categorical(['x', 'y', 'y'])
result = get_dummies(df, sparse=sparse, dtype=dtype).sort_index(axis=1)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame({'C': [1, 2, 3],
'A_a': arr([1, 0, 1], dtype=typ),
'A_b': arr([0, 1, 0], dtype=typ),
'B_b': arr([1, 1, 0], dtype=typ),
'B_c': arr([0, 0, 1], dtype=typ),
'cat_x': arr([1, 0, 0], dtype=typ),
'cat_y': arr([0, 1, 1], dtype=typ)
}).sort_index(axis=1)
assert_frame_equal(result, expected)
@pytest.mark.parametrize('get_dummies_kwargs,expected', [
({'data': pd.DataFrame(({u'ä': ['a']}))},
pd.DataFrame({u'ä_a': [1]}, dtype=np.uint8)),
({'data': pd.DataFrame({'x': [u'ä']})},
pd.DataFrame({u'x_ä': [1]}, dtype=np.uint8)),
({'data': pd.DataFrame({'x': [u'a']}), 'prefix':u'ä'},
pd.DataFrame({u'ä_a': [1]}, dtype=np.uint8)),
({'data': pd.DataFrame({'x': [u'a']}), 'prefix_sep':u'ä'},
pd.DataFrame({u'xäa': [1]}, dtype=np.uint8))])
def test_dataframe_dummies_unicode(self, get_dummies_kwargs, expected):
# GH22084 pd.get_dummies incorrectly encodes unicode characters
# in dataframe column names
result = get_dummies(**get_dummies_kwargs)
assert_frame_equal(result, expected)
def test_basic_drop_first(self, sparse):
# GH12402 Add a new parameter `drop_first` to avoid collinearity
# Basic case
s_list = list('abc')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame({'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype=np.uint8)
result = get_dummies(s_list, drop_first=True, sparse=sparse)
if sparse:
expected = expected.apply(pd.SparseArray, fill_value=0)
assert_frame_equal(result, expected)
result = get_dummies(s_series, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
expected.index = list('ABC')
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
def test_basic_drop_first_one_level(self, sparse):
# Test the case that categorical variable only has one level.
s_list = list('aaa')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame(index=np.arange(3))
result = get_dummies(s_list, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
result = get_dummies(s_series, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
expected = DataFrame(index=list('ABC'))
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
def test_basic_drop_first_NA(self, sparse):
# Test NA handling together with drop_first
s_NA = ['a', 'b', np.nan]
res = get_dummies(s_NA, drop_first=True, sparse=sparse)
exp = DataFrame({'b': [0, 1, 0]}, dtype=np.uint8)
if sparse:
exp = exp.apply(pd.SparseArray, fill_value=0)
assert_frame_equal(res, exp)
res_na = get_dummies(s_NA, dummy_na=True, drop_first=True,
sparse=sparse)
exp_na = DataFrame(
{'b': [0, 1, 0],
nan: [0, 0, 1]},
dtype=np.uint8).reindex(['b', nan], axis=1)
if sparse:
exp_na = exp_na.apply(pd.SparseArray, fill_value=0)
assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([nan], dummy_na=True, drop_first=True,
sparse=sparse)
exp_just_na = DataFrame(index=np.arange(1))
assert_frame_equal(res_just_na, exp_just_na)
def test_dataframe_dummies_drop_first(self, df, sparse):
df = df[['A', 'B']]
result = get_dummies(df, drop_first=True, sparse=sparse)
expected = DataFrame({'A_b': [0, 1, 0],
'B_c': [0, 0, 1]},
dtype=np.uint8)
if sparse:
expected = expected.apply(pd.SparseArray, fill_value=0)
assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_categorical(
self, df, sparse, dtype):
df['cat'] = pd.Categorical(['x', 'y', 'y'])
result = get_dummies(df, drop_first=True, sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'A_b': [0, 1, 0],
'B_c': [0, 0, 1],
'cat_y': [0, 1, 1]})
cols = ['A_b', 'B_c', 'cat_y']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_b', 'B_c', 'cat_y']]
if sparse:
for col in cols:
expected[col] = pd.SparseSeries(expected[col])
assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_na(self, df, sparse):
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True, drop_first=True,
sparse=sparse).sort_index(axis=1)
expected = DataFrame({'C': [1, 2, 3, np.nan],
'A_b': [0, 1, 0, 0],
'A_nan': [0, 0, 0, 1],
'B_c': [0, 0, 1, 0],
'B_nan': [0, 0, 0, 1]})
cols = ['A_b', 'A_nan', 'B_c', 'B_nan']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected.sort_index(axis=1)
if sparse:
for col in cols:
expected[col] = pd.SparseSeries(expected[col])
assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, drop_first=True,
sparse=sparse)
expected = expected[['C', 'A_b', 'B_c']]
assert_frame_equal(result, expected)
def test_int_int(self):
data = Series([1, 2, 1])
result = pd.get_dummies(data)
expected = DataFrame([[1, 0],
[0, 1],
[1, 0]],
columns=[1, 2],
dtype=np.uint8)
tm.assert_frame_equal(result, expected)
data = Series(pd.Categorical(['a', 'b', 'a']))
result = pd.get_dummies(data)
expected = DataFrame([[1, 0],
[0, 1],
[1, 0]],
columns=pd.Categorical(['a', 'b']),
dtype=np.uint8)
tm.assert_frame_equal(result, expected)
def test_int_df(self, dtype):
data = DataFrame(
{'A': [1, 2, 1],
'B': pd.Categorical(['a', 'b', 'a']),
'C': [1, 2, 1],
'D': [1., 2., 1.]
}
)
columns = ['C', 'D', 'A_1', 'A_2', 'B_a', 'B_b']
expected = DataFrame([
[1, 1., 1, 0, 1, 0],
[2, 2., 0, 1, 0, 1],
[1, 1., 1, 0, 1, 0]
], columns=columns)
expected[columns[2:]] = expected[columns[2:]].astype(dtype)
result = pd.get_dummies(data, columns=['A', 'B'], dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_preserve_categorical_dtype(self, dtype):
# GH13854
for ordered in [False, True]:
cat = pd.Categorical(list("xy"), categories=list("xyz"),
ordered=ordered)
result = get_dummies(cat, dtype=dtype)
data = np.array([[1, 0, 0], [0, 1, 0]],
dtype=self.effective_dtype(dtype))
cols = pd.CategoricalIndex(cat.categories,
categories=cat.categories,
ordered=ordered)
expected = DataFrame(data, columns=cols,
dtype=self.effective_dtype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('sparse', [True, False])
def test_get_dummies_dont_sparsify_all_columns(self, sparse):
# GH18914
df = DataFrame.from_dict(OrderedDict([('GDP', [1, 2]),
('Nation', ['AB', 'CD'])]))
df = get_dummies(df, columns=['Nation'], sparse=sparse)
df2 = df.reindex(columns=['GDP'])
tm.assert_frame_equal(df[['GDP']], df2)
def test_get_dummies_duplicate_columns(self, df):
# GH20839
df.columns = ["A", "A", "A"]
result = get_dummies(df).sort_index(axis=1)
expected = DataFrame([[1, 1, 0, 1, 0],
[2, 0, 1, 1, 0],
[3, 1, 0, 0, 1]],
columns=['A', 'A_a', 'A_b', 'A_b', 'A_c'],
dtype=np.uint8).sort_index(axis=1)
expected = expected.astype({"A": np.int64})
tm.assert_frame_equal(result, expected)
class TestCategoricalReshape(object):
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
def test_reshaping_panel_categorical(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
class TestMakeAxisDummies(object):
def test_preserve_categorical_dtype(self):
# GH13854
for ordered in [False, True]:
cidx = pd.CategoricalIndex(list("xyz"), ordered=ordered)
midx = pd.MultiIndex(levels=[['a'], cidx],
codes=[[0, 0], [0, 1]])
df = DataFrame([[10, 11]], index=midx)
expected = DataFrame([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
index=midx, columns=cidx)
from pandas.core.reshape.reshape import make_axis_dummies
result = make_axis_dummies(df)
tm.assert_frame_equal(result, expected)
result = make_axis_dummies(df, transform=lambda x: x)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
Midafi/scikit-image | skimage/viewer/plugins/overlayplugin.py | 40 | 3615 | from warnings import warn
from ...util.dtype import dtype_range
from .base import Plugin
from ..utils import ClearColormap, update_axes_image
import six
from ..._shared.version_requirements import is_installed
__all__ = ['OverlayPlugin']
class OverlayPlugin(Plugin):
"""Plugin for ImageViewer that displays an overlay on top of main image.
The base Plugin class displays the filtered image directly on the viewer.
OverlayPlugin will instead overlay an image with a transparent colormap.
See base Plugin class for additional details.
Attributes
----------
overlay : array
Overlay displayed on top of image. This overlay defaults to a color map
with alpha values varying linearly from 0 to 1.
color : int
Color of overlay.
"""
colors = {'red': (1, 0, 0),
'yellow': (1, 1, 0),
'green': (0, 1, 0),
'cyan': (0, 1, 1)}
def __init__(self, **kwargs):
if not is_installed('matplotlib', '>=1.2'):
msg = "Matplotlib >= 1.2 required for OverlayPlugin."
warn(RuntimeWarning(msg))
super(OverlayPlugin, self).__init__(**kwargs)
self._overlay_plot = None
self._overlay = None
self.cmap = None
self.color_names = sorted(list(self.colors.keys()))
def attach(self, image_viewer):
super(OverlayPlugin, self).attach(image_viewer)
#TODO: `color` doesn't update GUI widget when set manually.
self.color = 0
@property
def overlay(self):
return self._overlay
@overlay.setter
def overlay(self, image):
self._overlay = image
ax = self.image_viewer.ax
if image is None:
ax.images.remove(self._overlay_plot)
self._overlay_plot = None
elif self._overlay_plot is None:
vmin, vmax = dtype_range[image.dtype.type]
self._overlay_plot = ax.imshow(image, cmap=self.cmap,
vmin=vmin, vmax=vmax)
else:
update_axes_image(self._overlay_plot, image)
if self.image_viewer.useblit:
self.image_viewer._blit_manager.background = None
self.image_viewer.redraw()
@property
def color(self):
return self._color
@color.setter
def color(self, index):
# Update colormap whenever color is changed.
if isinstance(index, six.string_types) and \
index not in self.color_names:
raise ValueError("%s not defined in OverlayPlugin.colors" % index)
else:
name = self.color_names[index]
self._color = name
rgb = self.colors[name]
self.cmap = ClearColormap(rgb)
if self._overlay_plot is not None:
self._overlay_plot.set_cmap(self.cmap)
self.image_viewer.redraw()
@property
def filtered_image(self):
"""Return filtered image.
This "filtered image" is used when saving from the plugin.
"""
return self.overlay
def display_filtered_image(self, image):
"""Display filtered image as an overlay on top of image in viewer."""
self.overlay = image
def closeEvent(self, event):
# clear overlay from ImageViewer on close
self.overlay = None
super(OverlayPlugin, self).closeEvent(event)
def output(self):
"""Return the overlaid image.
Returns
-------
overlay : array, same shape as image
The overlay currently displayed.
data : None
"""
return (self.overlay, None)
| bsd-3-clause |
dmitru/rankpy | rankpy/analysis/lambdamart.py | 1 | 3256 | # This file is part of RankPy.
#
# RankPy is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RankPy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with RankPy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
from matplotlib.ticker import MaxNLocator
def plot_lambdas_andrews_curves(lambdas, relevance_scores):
columns = ['Tree %d' % i for i in range(1, 1 + lambdas.shape[0])]
columns.append('Relevance')
data = pd.DataFrame(np.r_[lambdas, relevance_scores.reshape(1, -1).astype(int)].T, columns=columns)
pd.tools.plotting.andrews_curves(data, 'Relevance')
handles, labels = plt.gca().get_legend_handles_labels()
plt.gca().legend(handles, map(lambda s: 'Relevance ' + s, labels))
def plot_lambdas_parallel_coordinates(lambdas, relevance_scores, individual=False, cumulative=False):
unique_scores = sorted(np.unique(relevance_scores).astype(int), reverse=True)
colors = ['r', 'g', 'b', 'y', 'c', 'm', 'y', 'k']
if not individual:
plt.figure()
legend_handles = []
legend_labels = []
for c, r in enumerate(unique_scores):
legend_handles.append(mlines.Line2D([], [], color=colors[c], linewidth=2))
legend_labels.append('Relevance %d' % r)
if cumulative:
lambdas_cumsum = lambdas.cumsum(axis=0)
ymin, ymax = lambdas_cumsum.min(), lambdas_cumsum.max()
else:
ymin, ymax = lambdas.min(), lambdas.max()
for c, r in enumerate(unique_scores):
if individual:
plt.figure()
if cumulative:
plt.plot(lambdas[:, relevance_scores == r].cumsum(axis=0), '-', marker='.', markersize=1, c=colors[c], alpha=0.4)
else:
plt.plot(lambdas[:, relevance_scores == r], '-', marker='.', markersize=1, c=colors[c], alpha=0.4)
if individual:
plt.gca().get_xaxis().set_major_locator(MaxNLocator(integer=True))
plt.gca().set_ylim([ymin, ymax])
plt.title('Paralell Coordinates for%sLambdas (Relevance %d)' % (' Cumulative ' if cumulative else ' ', r))
plt.xlabel('Trees')
plt.ylabel('Cumulative Lambda Values' if cumulative else 'Lambda Values')
plt.show()
if not individual:
plt.gca().get_yaxis().set_major_locator(MaxNLocator(integer=True))
plt.gca().set_ylim([ymin, ymax])
plt.title('Paralell Coordinates for%sLambdas (Relevance %d)' % (' Cumulative ' if cumulative else ' ', r))
plt.xlabel('Trees')
plt.ylabel('Cumulative Lambda Values' if cumulative else 'Lambda Values')
plt.legend(legend_handles, legend_labels, loc='best')
plt.show()
| gpl-3.0 |
Sentient07/scikit-learn | examples/plot_digits_pipe.py | 65 | 1652 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
# Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
OceanPARCELS/parcels | parcels/plotting.py | 1 | 15740 | from datetime import datetime
from datetime import timedelta as delta
import numpy as np
from parcels.field import Field
from parcels.field import VectorField
from parcels.grid import CurvilinearGrid
from parcels.grid import GridCode
from parcels.tools.statuscodes import TimeExtrapolationError
from parcels.tools.loggers import logger
def plotparticles(particles, with_particles=True, show_time=None, field=None, domain=None, projection=None,
land=True, vmin=None, vmax=None, savefile=None, animation=False, **kwargs):
"""Function to plot a Parcels ParticleSet
:param show_time: Time at which to show the ParticleSet
:param with_particles: Boolean whether particles are also plotted on Field
:param field: Field to plot under particles (either None, a Field object, or 'vector')
:param domain: dictionary (with keys 'N', 'S', 'E', 'W') defining domain to show
:param projection: type of cartopy projection to use (default PlateCarree)
:param land: Boolean whether to show land. This is ignored for flat meshes
:param vmin: minimum colour scale (only in single-plot mode)
:param vmax: maximum colour scale (only in single-plot mode)
:param savefile: Name of a file to save the plot to
:param animation: Boolean whether result is a single plot, or an animation
"""
show_time = particles[0].time if show_time is None else show_time
if isinstance(show_time, datetime):
show_time = np.datetime64(show_time)
if isinstance(show_time, np.datetime64):
if not particles.time_origin:
raise NotImplementedError(
'If fieldset.time_origin is not a date, showtime cannot be a date in particleset.show()')
show_time = particles.time_origin.reltime(show_time)
if isinstance(show_time, delta):
show_time = show_time.total_seconds()
if np.isnan(show_time):
show_time, _ = particles.fieldset.gridset.dimrange('time_full')
if field is None:
spherical = True if particles.fieldset.U.grid.mesh == 'spherical' else False
plt, fig, ax, cartopy = create_parcelsfig_axis(spherical, land, projection, cartopy_features=kwargs.pop('cartopy_features', []))
if plt is None:
return # creating axes was not possible
ax.set_title('Particles' + parsetimestr(particles.fieldset.U.grid.time_origin, show_time))
latN, latS, lonE, lonW = parsedomain(domain, particles.fieldset.U)
if cartopy is None or projection is None:
if domain is not None:
if isinstance(particles.fieldset.U.grid, CurvilinearGrid):
ax.set_xlim(particles.fieldset.U.grid.lon[latS, lonW], particles.fieldset.U.grid.lon[latN, lonE])
ax.set_ylim(particles.fieldset.U.grid.lat[latS, lonW], particles.fieldset.U.grid.lat[latN, lonE])
else:
ax.set_xlim(particles.fieldset.U.grid.lon[lonW], particles.fieldset.U.grid.lon[lonE])
ax.set_ylim(particles.fieldset.U.grid.lat[latS], particles.fieldset.U.grid.lat[latN])
else:
ax.set_xlim(np.nanmin(particles.fieldset.U.grid.lon), np.nanmax(particles.fieldset.U.grid.lon))
ax.set_ylim(np.nanmin(particles.fieldset.U.grid.lat), np.nanmax(particles.fieldset.U.grid.lat))
elif domain is not None:
if isinstance(particles.fieldset.U.grid, CurvilinearGrid):
ax.set_extent([particles.fieldset.U.grid.lon[latS, lonW], particles.fieldset.U.grid.lon[latN, lonE],
particles.fieldset.U.grid.lat[latS, lonW], particles.fieldset.U.grid.lat[latN, lonE]])
else:
ax.set_extent([particles.fieldset.U.grid.lon[lonW], particles.fieldset.U.grid.lon[lonE],
particles.fieldset.U.grid.lat[latS], particles.fieldset.U.grid.lat[latN]])
else:
if field == 'vector':
field = particles.fieldset.UV
elif not isinstance(field, Field):
field = getattr(particles.fieldset, field)
depth_level = kwargs.pop('depth_level', 0)
plt, fig, ax, cartopy = plotfield(field=field, animation=animation, show_time=show_time, domain=domain,
projection=projection, land=land, vmin=vmin, vmax=vmax, savefile=None,
titlestr='Particles and ', depth_level=depth_level, **kwargs)
if plt is None:
return # creating axes was not possible
if with_particles:
plon = np.array([p.lon for p in particles])
plat = np.array([p.lat for p in particles])
if cartopy:
ax.scatter(plon, plat, s=20, color='black', zorder=20, transform=cartopy.crs.PlateCarree())
else:
ax.scatter(plon, plat, s=20, color='black', zorder=20)
if animation:
plt.draw()
plt.pause(0.0001)
elif savefile is None:
plt.show()
else:
plt.savefig(savefile)
logger.info('Plot saved to ' + savefile + '.png')
plt.close()
def plotfield(field, show_time=None, domain=None, depth_level=0, projection=None, land=True,
vmin=None, vmax=None, savefile=None, **kwargs):
"""Function to plot a Parcels Field
:param show_time: Time at which to show the Field
:param domain: dictionary (with keys 'N', 'S', 'E', 'W') defining domain to show
:param depth_level: depth level to be plotted (default 0)
:param projection: type of cartopy projection to use (default PlateCarree)
:param land: Boolean whether to show land. This is ignored for flat meshes
:param vmin: minimum colour scale (only in single-plot mode)
:param vmax: maximum colour scale (only in single-plot mode)
:param savefile: Name of a file to save the plot to
:param animation: Boolean whether result is a single plot, or an animation
"""
if type(field) is VectorField:
spherical = True if field.U.grid.mesh == 'spherical' else False
field = [field.U, field.V]
plottype = 'vector'
elif type(field) is Field:
spherical = True if field.grid.mesh == 'spherical' else False
field = [field]
plottype = 'scalar'
else:
raise RuntimeError('field needs to be a Field or VectorField object')
if field[0].grid.gtype in [GridCode.CurvilinearZGrid, GridCode.CurvilinearSGrid]:
logger.warning('Field.show() does not always correctly determine the domain for curvilinear grids. '
'Use plotting with caution and perhaps use domain argument as in the NEMO 3D tutorial')
plt, fig, ax, cartopy = create_parcelsfig_axis(spherical, land, projection=projection, cartopy_features=kwargs.pop('cartopy_features', []))
if plt is None:
return None, None, None, None # creating axes was not possible
data = {}
plotlon = {}
plotlat = {}
for i, fld in enumerate(field):
show_time = fld.grid.time[0] if show_time is None else show_time
if fld.grid.defer_load:
fld.fieldset.computeTimeChunk(show_time, 1)
(idx, periods) = fld.time_index(show_time)
show_time -= periods * (fld.grid.time_full[-1] - fld.grid.time_full[0])
if show_time > fld.grid.time[-1] or show_time < fld.grid.time[0]:
raise TimeExtrapolationError(show_time, field=fld, msg='show_time')
latN, latS, lonE, lonW = parsedomain(domain, fld)
if isinstance(fld.grid, CurvilinearGrid):
plotlon[i] = fld.grid.lon[latS:latN, lonW:lonE]
plotlat[i] = fld.grid.lat[latS:latN, lonW:lonE]
else:
plotlon[i] = fld.grid.lon[lonW:lonE]
plotlat[i] = fld.grid.lat[latS:latN]
if i > 0 and not np.allclose(plotlon[i], plotlon[0]):
raise RuntimeError('VectorField needs to be on an A-grid for plotting')
if fld.grid.time.size > 1:
if fld.grid.zdim > 1:
data[i] = np.squeeze(fld.temporal_interpolate_fullfield(idx, show_time))[depth_level, latS:latN, lonW:lonE]
else:
data[i] = np.squeeze(fld.temporal_interpolate_fullfield(idx, show_time))[latS:latN, lonW:lonE]
else:
if fld.grid.zdim > 1:
data[i] = np.squeeze(fld.data)[depth_level, latS:latN, lonW:lonE]
else:
data[i] = np.squeeze(fld.data)[latS:latN, lonW:lonE]
if plottype == 'vector':
if field[0].interp_method == 'cgrid_velocity':
logger.warning_once('Plotting a C-grid velocity field is achieved via an A-grid projection, reducing the plot accuracy')
d = np.empty_like(data[0])
d[:-1, :] = (data[0][:-1, :] + data[0][1:, :]) / 2.
d[-1, :] = data[0][-1, :]
data[0] = d
d = np.empty_like(data[0])
d[:, :-1] = (data[0][:, :-1] + data[0][:, 1:]) / 2.
d[:, -1] = data[0][:, -1]
data[1] = d
spd = data[0] ** 2 + data[1] ** 2
speed = np.where(spd > 0, np.sqrt(spd), 0)
vmin = speed.min() if vmin is None else vmin
vmax = speed.max() if vmax is None else vmax
if isinstance(field[0].grid, CurvilinearGrid):
x, y = plotlon[0], plotlat[0]
else:
x, y = np.meshgrid(plotlon[0], plotlat[0])
u = np.where(speed > 0., data[0]/speed, 0)
v = np.where(speed > 0., data[1]/speed, 0)
if cartopy:
cs = ax.quiver(np.asarray(x), np.asarray(y), np.asarray(u), np.asarray(v), speed, cmap=plt.cm.gist_ncar, clim=[vmin, vmax], scale=50, transform=cartopy.crs.PlateCarree())
else:
cs = ax.quiver(x, y, u, v, speed, cmap=plt.cm.gist_ncar, clim=[vmin, vmax], scale=50)
else:
vmin = data[0].min() if vmin is None else vmin
vmax = data[0].max() if vmax is None else vmax
assert len(data[0].shape) == 2
if field[0].interp_method == 'cgrid_tracer':
d = data[0][1:, 1:]
elif field[0].interp_method == 'cgrid_velocity':
if field[0].fieldtype == 'U':
d = np.empty_like(data[0])
d[:-1, :-1] = (data[0][1:, :-1] + data[0][1:, 1:]) / 2.
elif field[0].fieldtype == 'V':
d = np.empty_like(data[0])
d[:-1, :-1] = (data[0][:-1, 1:] + data[0][1:, 1:]) / 2.
else: # W
d = data[0][1:, 1:]
else: # if A-grid
d = (data[0][:-1, :-1] + data[0][1:, :-1] + data[0][:-1, 1:] + data[0][1:, 1:])/4.
d = np.where(data[0][:-1, :-1] == 0, 0, d)
d = np.where(data[0][1:, :-1] == 0, 0, d)
d = np.where(data[0][1:, 1:] == 0, 0, d)
d = np.where(data[0][:-1, 1:] == 0, 0, d)
if cartopy:
cs = ax.pcolormesh(plotlon[0], plotlat[0], d, transform=cartopy.crs.PlateCarree())
else:
cs = ax.pcolormesh(plotlon[0], plotlat[0], d)
if cartopy is None:
ax.set_xlim(np.nanmin(plotlon[0]), np.nanmax(plotlon[0]))
ax.set_ylim(np.nanmin(plotlat[0]), np.nanmax(plotlat[0]))
elif domain is not None:
ax.set_extent([np.nanmin(plotlon[0]), np.nanmax(plotlon[0]), np.nanmin(plotlat[0]), np.nanmax(plotlat[0])], crs=cartopy.crs.PlateCarree())
cs.cmap.set_over('k')
cs.cmap.set_under('w')
cs.set_clim(vmin, vmax)
cartopy_colorbar(cs, plt, fig, ax)
timestr = parsetimestr(field[0].grid.time_origin, show_time)
titlestr = kwargs.pop('titlestr', '')
if field[0].grid.zdim > 1:
if field[0].grid.gtype in [GridCode.CurvilinearZGrid, GridCode.RectilinearZGrid]:
gphrase = 'depth'
depth_or_level = field[0].grid.depth[depth_level]
else:
gphrase = 'level'
depth_or_level = depth_level
depthstr = ' at %s %g ' % (gphrase, depth_or_level)
else:
depthstr = ''
if plottype == 'vector':
ax.set_title(titlestr + 'Velocity field' + depthstr + timestr)
else:
ax.set_title(titlestr + field[0].name + depthstr + timestr)
if not spherical:
ax.set_xlabel('Zonal distance [m]')
ax.set_ylabel('Meridional distance [m]')
plt.draw()
if savefile:
plt.savefig(savefile)
logger.info('Plot saved to ' + savefile + '.png')
plt.close()
return plt, fig, ax, cartopy
def create_parcelsfig_axis(spherical, land=True, projection=None, central_longitude=0, cartopy_features=[]):
try:
import matplotlib.pyplot as plt
except:
logger.info("Visualisation is not possible. Matplotlib not found.")
return None, None, None, None # creating axes was not possible
if projection is not None and not spherical:
raise RuntimeError('projection not accepted when Field doesn''t have geographic coordinates')
if spherical:
try:
import cartopy
except:
logger.info("Visualisation of field with geographic coordinates is not possible. Cartopy not found.")
return None, None, None, None # creating axes was not possible
projection = cartopy.crs.PlateCarree(central_longitude) if projection is None else projection
fig, ax = plt.subplots(1, 1, subplot_kw={'projection': projection})
try: # gridlines not supported for all projections
if isinstance(projection, cartopy.crs.PlateCarree) and central_longitude != 0:
gl = ax.gridlines(crs=cartopy.crs.PlateCarree(), draw_labels=True) # central_lon=0 necessary for correct xlabels
else:
gl = ax.gridlines(crs=projection, draw_labels=True)
gl.xlabels_top, gl.ylabels_right = (False, False)
gl.xformatter = cartopy.mpl.gridliner.LONGITUDE_FORMATTER
gl.yformatter = cartopy.mpl.gridliner.LATITUDE_FORMATTER
except:
pass
for feature in cartopy_features:
ax.add_feature(feature)
if isinstance(land, str):
ax.coastlines(land)
elif land:
ax.coastlines()
else:
cartopy = None
fig, ax = plt.subplots(1, 1)
ax.grid()
return plt, fig, ax, cartopy
def parsedomain(domain, field):
field.grid.check_zonal_periodic()
if domain is not None:
if not isinstance(domain, dict) and len(domain) == 4: # for backward compatibility with <v2.0.0
domain = {'N': domain[0], 'S': domain[1], 'E': domain[2], 'W': domain[3]}
_, _, _, lonW, latS, _ = field.search_indices(domain['W'], domain['S'], 0, 0, 0, search2D=True)
_, _, _, lonE, latN, _ = field.search_indices(domain['E'], domain['N'], 0, 0, 0, search2D=True)
return latN+1, latS, lonE+1, lonW
else:
if field.grid.gtype in [GridCode.RectilinearSGrid, GridCode.CurvilinearSGrid]:
return field.grid.lon.shape[0], 0, field.grid.lon.shape[1], 0
else:
return len(field.grid.lat), 0, len(field.grid.lon), 0
def parsetimestr(time_origin, show_time):
if time_origin.calendar is None:
return ' after ' + str(delta(seconds=show_time)) + ' hours'
else:
date_str = str(time_origin.fulltime(show_time))
return ' on ' + date_str[:10] + ' ' + date_str[11:19]
def cartopy_colorbar(cs, plt, fig, ax):
cbar_ax = fig.add_axes([0, 0, 0.1, 0.1])
fig.subplots_adjust(hspace=0, wspace=0, top=0.925, left=0.1)
plt.colorbar(cs, cax=cbar_ax)
def resize_colorbar(event):
plt.draw()
posn = ax.get_position()
cbar_ax.set_position([posn.x0 + posn.width + 0.01, posn.y0, 0.04, posn.height])
fig.canvas.mpl_connect('resize_event', resize_colorbar)
resize_colorbar(None)
| mit |
pymango/pymango | misc/python/mango/application/shale.py | 1 | 9724 | __doc__ = \
"""
======================================================================
Multi-modal shale CT imaging analysis (:mod:`mango.application.shale`)
======================================================================
.. currentmodule:: mango.application.shale
Analysis of shale dry, dry-after, Iodine-stained and Diiodomethane-stained CT images.
Functions
=========
.. autosummary::
:toctree: generated/
convertShaleHist2dToTernary - Converts 2D histogram data to ternary Histogram data.
resolveHistogramDuplicateEntries - Resolves duplicate/close ternary coordinates for triangulation.
generateShaleTernaryPlot - Generates ternary histogram plots from micro-porosity segmented data.
"""
from .io import readCsvHistData
from .plot import ternaryPlot
import numpy as np
import scipy as sp
import mango.mpi as mpi
logger, rootLogger = mpi.getLoggers(__name__)
class MicroPorosityBinToPercentMapper:
"""
Maps micro-porosity segmentation class values to percentage value.
"""
def __init__(self, bin0percent, bin100percent):
self.bin0percent = bin0percent
self.bin100percent = bin100percent
def __call__(self, binIdx):
if (binIdx <= self.bin0percent):
percentVal = 0.0
elif (binIdx >= self.bin100percent):
percentVal = 100.0
else:
percentVal = 100.0*(binIdx - self.bin0percent)/float((self.bin100percent-self.bin0percent))
return percentVal
INVALID_PROPORTION_RESCALE = 0
INVALID_PROPORTION_DISCARD = 1
def convertShaleHist2dToTernary(
histData,
invalidProportionMethod=INVALID_PROPORTION_RESCALE,
cropRange=None,
cropIndex=None
):
"""
Returns a :samp:`(N,4)` shaped :obj:`numpy.array` of ternary
*(mineral,pore,organic,frequency)* data. The input :samp:`histData`
is 2D histogram data generated from a pair of *micro-porosity* segmented
images. The x-axis data is assumed to be the CH2I2 differenced data.
:type histData: :obj:`mango.application.io.HistData`
:param histData: 2D histogram data of micro-porosity segmentation image pair
(micro-porosity segmentation of CH2I2-image minus dry-after-image image and
micro-porosity segmentation of I2-image minus dry-image). Assumes that
the CH2I2 data is the x-axis of the :samp:`histData`.
:type invalidProportionMethod: int
:param invalidProportionMethod: Method used to resolve data
points where :samp:`pore_percent+organic_percent` exceeds 100%.
:rtype: :obj:`numpy.ndarray`
:rtype: A :samp:`(N,4)` shaped :obj:`numpy.ndarray`, where :samp:`N=num_x_bins*num_y_bins`.
Each row of the returned array is :samp:`(mineral-percent, pore-percent, organic-percent, count)`.
"""
numPorosityBins = histData.hist1dData0.size
numOrganicityBins = histData.hist1dData1.size
pMapper = MicroPorosityBinToPercentMapper(1.0, numPorosityBins-2.0)
oMapper = MicroPorosityBinToPercentMapper(1.0, numOrganicityBins-2.0)
#pMapper = MicroPorosityBinToPercentMapper(0.0, numPorosityBins-1.0)
#oMapper = MicroPorosityBinToPercentMapper(0.0, numOrganicityBins-1.0)
ternList = []
for pIdx in range(0, numPorosityBins):
for oIdx in range(0, numOrganicityBins):
porosity = 100.0 - pMapper(pIdx)
organicity = 100.0 - oMapper(oIdx)
if (porosity + organicity > 100.0):
if ((invalidProportionMethod == INVALID_PROPORTION_RESCALE)):
f = 100.0/(porosity + organicity + 1.0e-3)
porosity *= f
organicity *= f
elif (invalidProportionMethod == INVALID_PROPORTION_DISCARD):
continue
minerality = 100.0-porosity-organicity
valList = [minerality, porosity, organicity]
if (
((cropRange == None) or (cropIndex == None))
or
((valList[cropIndex] >= cropRange[0]) and (valList[cropIndex] <= cropRange[1]))
):
if ((cropRange != None) and (cropIndex!=None)):
valList[cropIndex] = 100.0*(valList[cropIndex] - cropRange[0])/(cropRange[1]-cropRange[0])
minerality, porosity, organicity = valList
ternList.append([minerality, porosity, organicity, histData.hist2dData[oIdx, pIdx]])
return sp.array(ternList, dtype="float64")
def resolveHistogramDuplicateEntries(ternaryArray, tol=1.0e-4):
"""
Remove duplicate/close coordinates in the :samp:`(N,4)` shaped :samp:`ternaryArray`
histogram.
"""
numCoords = ternaryArray.shape[0]
msk = sp.ones((numCoords,), dtype="bool")
coordArray = ternaryArray[:, 0:3]
nonDupList = []
for i in range(0, numCoords):
if (msk[i]):
coord = coordArray[i,:]
d = coordArray - coord
d = sp.sqrt(sp.sum(d*d, axis=1))
nearCoordIdxs = sp.where(sp.logical_and(d < tol, msk))
nonDupList.append(coord.tolist() + [sp.sum(ternaryArray[:,-1][nearCoordIdxs]),])
msk[nearCoordIdxs] = False
return sp.array(nonDupList, dtype=ternaryArray.dtype)
def generateShaleTernaryPlot(
histData,
cropRange = None,
cropIndex = None,
invalidProportionMethodList=[INVALID_PROPORTION_RESCALE,INVALID_PROPORTION_DISCARD],
doLogScale = True,
cmap = None,
contourNumLevels = 32,
contourNumLines = None,
doContourColourBar = False,
shading='gouraud'
):
"""
Returns a list of (:obj:`matplotlib.figure.Figure`, :obj:`str`) pairs with ternary
*mineral-pore-organic* 2D histogram plots.
:type histData: :obj:`mango.application.io.HistData`
:param histData: 2D histogram data of micro-porosity segmentation image pair
(micro-porosity segmentation of CH2I2-image minus dry-after-image image and
micro-porosity segmentation of I2-image minus dry-image). Assumes that
the CH2I2 data is the x-axis of the :samp:`histData`.
:rtype: :obj:`list` of pairs
:return: List of (:obj:`matplotlib.figure.Figure`, :obj:`str`) pairs.
"""
import matplotlib.pyplot as plt
if (cmap == None):
cmap = plt.cm.get_cmap("gray_r")
figList = []
origCountSum = sp.sum(histData.hist2dData)
labels=("mineral", "pore", "organic")
for invalidProportionMethod in invalidProportionMethodList:
ternaryArray = convertShaleHist2dToTernary(histData, invalidProportionMethod, cropRange=cropRange, cropIndex=cropIndex)
ternaryArray = resolveHistogramDuplicateEntries(ternaryArray, tol=0.9990)
countSum = sp.sum(ternaryArray[:,-1])
percentCountsDiscarded = 100.0*(origCountSum-countSum)/float(origCountSum)
titleOffset = 1.08
fontSize = "small"
if (invalidProportionMethod == INVALID_PROPORTION_DISCARD):
invalidProportionMethodStr = "discard"
titleStr = "Percent Counts Discarded = %g%%" % percentCountsDiscarded
else:
invalidProportionMethodStr = "rescale"
titleStr = "Rescaled Points"
if (cropIndex != None) and (cropRange != None):
titleStr += " (%s cropped to range [%s%%,%s%%])" % ((labels[cropIndex], ) + cropRange)
fontSize = "x-small"
logger.info(titleStr)
logger.debug("ternaryArray.shape=%s", (ternaryArray.shape,))
logger.debug("ternaryArray:\n")
logger.debug(str(ternaryArray))
logger.debug(
"ternaryArray (min-x,min-y,min-z)=(%s,%s,%s)"
%
(np.min(ternaryArray[:,0]), np.min(ternaryArray[:,1]), np.min(ternaryArray[:,2]))
)
logger.debug(
"ternaryArray (max-x,max-y,max-z)=(%s,%s,%s)"
%
(np.max(ternaryArray[:,0]), np.max(ternaryArray[:,1]), np.max(ternaryArray[:,2]))
)
ternaryPlotData, ternAxes = ternaryPlot(ternaryArray[:, 0:3], labels=labels)
logger.debug("ternaryPlotData.shape=%s", (ternaryPlotData.shape,))
logger.debug("ternaryPlotData:\n")
logger.debug(str(ternaryPlotData))
ax, fig = ternAxes.createAxes()
ax.scatter(ternaryPlotData[:,0], ternaryPlotData[:,1])
figList.append((fig, "coords_%s" % invalidProportionMethodStr))
if (doLogScale):
ternaryArray[:,-1] = sp.log(1.0+ternaryArray[:,-1])
pass
ax, fig = ternAxes.createAxes()
ax.triplot(ternaryPlotData[:,0], ternaryPlotData[:,1])
figList.append((fig, "coords_triangulated_%s" % invalidProportionMethodStr))
ax, fig = ternAxes.createAxes()
ax.tripcolor(ternaryPlotData[:,0], ternaryPlotData[:,1], ternaryArray[:,-1], shading=shading, cmap=cmap)
t = plt.title(titleStr, fontsize=fontSize)
t.set_y(titleOffset)
figList.append((fig, "ternary_triangulated_%s" % invalidProportionMethodStr))
ax, fig = ternAxes.createAxes()
if (contourNumLines == None):
contourNumLines = contourNumLevels//2
cs = ax.tricontourf(ternaryPlotData[:,0], ternaryPlotData[:,1], ternaryArray[:,-1],contourNumLevels, cmap=cmap)
if (doContourColourBar):
fig.colorbar(cs, shrink=0.9)
contourPlt = ax.tricontour(ternaryPlotData[:,0], ternaryPlotData[:,1], ternaryArray[:,-1],contourNumLines, colors='k', linewidths=1)
t = plt.title(titleStr, fontsize=fontSize)
t.set_y(titleOffset)
figList.append((fig, "ternary_contour_triangulated_%s" % invalidProportionMethodStr))
return figList
__all__ = [s for s in dir() if not s.startswith('_')]
| bsd-2-clause |
B3AU/waveTree | sklearn/mixture/gmm.py | 5 | 27249 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Bertrand Thirion <bertrand.thirion@inria.fr>
import numpy as np
from ..base import BaseEstimator
from ..utils import check_random_state, deprecated
from ..utils.extmath import logsumexp, pinvh
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covars : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
from scipy import linalg
U, s, V = linalg.svd(covar)
sqrtS = np.diag(np.sqrt(s))
sqrt_covar = np.dot(U, np.dot(sqrtS, V))
rand = np.dot(sqrt_covar, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
thresh : float, optional
Convergence threshold.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
Attributes
----------
`weights_` : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
`means_` : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
`covars_` : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
`converged_` : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Ininite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=0.01)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=0.01)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=1e-2, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc'):
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
if not covariance_type in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on `cvtype`::
(`n_states`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_states`, `n_features`) if 'diag',
(`n_states`, `n_features`, `n_features`) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
@deprecated("GMM.eval was renamed to GMM.score_samples in 0.14 and will be"
" removed in 0.16.")
def eval(self, X):
return self.score_samples(X)
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type)
+ np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,)
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit(self, X):
"""Estimate model parameters with the expectation-maximization
algorithm.
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating the
GMM object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
"""
## initialization step
X = np.asarray(X, dtype=np.float)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
for _ in range(self.n_init):
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
# EM algorithms
log_likelihood = []
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
# Expectation step
curr_log_likelihood, responsibilities = self.score_samples(X)
log_likelihood.append(curr_log_likelihood.sum())
# Check for convergence.
if i > 0 and abs(log_likelihood[-1] - log_likelihood[-2]) < \
self.thresh:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
# if the results are better, keep it
if self.n_iter:
if log_likelihood[-1] > max_log_prob:
max_log_prob = log_likelihood[-1]
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
# self.n_iter == 0 occurs when using GMM within HMM
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weihgts.
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
## some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means=0.0, covars=1.0):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means=0.0, covars=1.0):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
from scipy import linalg
n_samples, n_dim = X.shape
icv = pinvh(covars)
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.log(linalg.det(covars) + 0.1)
+ np.sum(X * np.dot(X, icv), 1)[:, np.newaxis]
- 2 * np.dot(np.dot(X, icv), means.T)
+ np.sum(means * np.dot(means, icv), 1))
return lpr
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices.
"""
from scipy import linalg
if hasattr(linalg, 'solve_triangular'):
# only in scipy since 0.9
solve_triangular = linalg.solve_triangular
else:
# slower, but works
solve_triangular = linalg.solve
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probabily stuck in a component with too
# few observations, we need to reinitialize this components
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template
"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
# Underflow Errors in doing post * X.T are not important
np.seterr(under='ignore')
avg_cv = np.dot(post * X.T, X) / (post.sum() + 10 * EPS)
mu = gmm.means_[c][np.newaxis]
cv[c] = (avg_cv - np.dot(mu.T, mu) + min_covar * np.eye(n_features))
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
n_features = X.shape[1]
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
return (avg_X2 - avg_means2 + min_covar * np.eye(n_features)) / X.shape[0]
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
herilalaina/scikit-learn | examples/model_selection/plot_multi_metric_evaluation.py | 29 | 3755 | """
============================================================================
Demonstration of multi-metric evaluation on cross_val_score and GridSearchCV
============================================================================
Multiple metric parameter search can be done by setting the ``scoring``
parameter to a list of metric scorer names or a dict mapping the scorer names
to the scorer callables.
The scores of all the scorers are available in the ``cv_results_`` dict at keys
ending in ``'_<scorer_name>'`` (``'mean_test_precision'``,
``'rank_test_precision'``, etc...)
The ``best_estimator_``, ``best_index_``, ``best_score_`` and ``best_params_``
correspond to the scorer (key) that is set to the ``refit`` attribute.
"""
# Author: Raghav RV <rvraghav93@gmail.com>
# License: BSD
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_hastie_10_2
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import make_scorer
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
print(__doc__)
###############################################################################
# Running ``GridSearchCV`` using multiple evaluation metrics
# ----------------------------------------------------------
#
X, y = make_hastie_10_2(n_samples=8000, random_state=42)
# The scorers can be either be one of the predefined metric strings or a scorer
# callable, like the one returned by make_scorer
scoring = {'AUC': 'roc_auc', 'Accuracy': make_scorer(accuracy_score)}
# Setting refit='AUC', refits an estimator on the whole dataset with the
# parameter setting that has the best cross-validated AUC score.
# That estimator is made available at ``gs.best_estimator_`` along with
# parameters like ``gs.best_score_``, ``gs.best_parameters_`` and
# ``gs.best_index_``
gs = GridSearchCV(DecisionTreeClassifier(random_state=42),
param_grid={'min_samples_split': range(2, 403, 10)},
scoring=scoring, cv=5, refit='AUC')
gs.fit(X, y)
results = gs.cv_results_
###############################################################################
# Plotting the result
# -------------------
plt.figure(figsize=(13, 13))
plt.title("GridSearchCV evaluating using multiple scorers simultaneously",
fontsize=16)
plt.xlabel("min_samples_split")
plt.ylabel("Score")
plt.grid()
ax = plt.axes()
ax.set_xlim(0, 402)
ax.set_ylim(0.73, 1)
# Get the regular numpy array from the MaskedArray
X_axis = np.array(results['param_min_samples_split'].data, dtype=float)
for scorer, color in zip(sorted(scoring), ['g', 'k']):
for sample, style in (('train', '--'), ('test', '-')):
sample_score_mean = results['mean_%s_%s' % (sample, scorer)]
sample_score_std = results['std_%s_%s' % (sample, scorer)]
ax.fill_between(X_axis, sample_score_mean - sample_score_std,
sample_score_mean + sample_score_std,
alpha=0.1 if sample == 'test' else 0, color=color)
ax.plot(X_axis, sample_score_mean, style, color=color,
alpha=1 if sample == 'test' else 0.7,
label="%s (%s)" % (scorer, sample))
best_index = np.nonzero(results['rank_test_%s' % scorer] == 1)[0][0]
best_score = results['mean_test_%s' % scorer][best_index]
# Plot a dotted vertical line at the best score for that scorer marked by x
ax.plot([X_axis[best_index], ] * 2, [0, best_score],
linestyle='-.', color=color, marker='x', markeredgewidth=3, ms=8)
# Annotate the best score for that scorer
ax.annotate("%0.2f" % best_score,
(X_axis[best_index], best_score + 0.005))
plt.legend(loc="best")
plt.grid('off')
plt.show()
| bsd-3-clause |
shusenl/scikit-learn | sklearn/tests/test_isotonic.py | 230 | 11087 | import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
| bsd-3-clause |
rsivapr/scikit-learn | benchmarks/bench_glm.py | 297 | 1493 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
| bsd-3-clause |
vshtanko/scikit-learn | examples/gaussian_process/plot_gp_regression.py | 253 | 4054 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression exercise computed in two different ways:
1. A noise-free case with a cubic correlation model
2. A noisy case with a squared Euclidean correlation model
In both cases, the model parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``nugget`` is applied as a Tikhonov regularization
of the assumed covariance between the training points. In the special case
of the squared euclidean correlation model, nugget is mathematically equivalent
to a normalized variance: That is
.. math::
\mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# Licence: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
weinbe58/QuSpin | docs/downloads/8ebdaf354c80ef927ecd6a3597c6b0f6/example5.py | 3 | 4756 | from __future__ import print_function, division
import sys,os
# line 4 and line 5 below are for development purposes and can be removed
qspin_path = os.path.join(os.getcwd(),"../../")
sys.path.insert(0,qspin_path)
#####################################################################
# example 5 #
# In this script we demonstrate how to use QuSpin's to build #
# the Hamiltonian of the SSH model in real and momentum space. #
# Along the way, we showcase the block tools which allow the #
# user to create block-diagonal Hamiltonians. Last, we show #
# how to time-evolve free fermion states like the Fermi sea #
# and measure correlators. #
#####################################################################
from quspin.operators import hamiltonian,exp_op # Hamiltonians and operators
from quspin.basis import spinless_fermion_basis_1d # Hilbert space fermion basis
from quspin.tools.block_tools import block_diag_hamiltonian # block diagonalisation
import numpy as np # generic math functions
import matplotlib.pyplot as plt # plotting library
try: # import python 3 zip function in python 2 and pass if already using python 3
import itertools.izip as zip
except ImportError:
pass
##### define model parameters #####
L=100 # system size
J=1.0 # uniform hopping
deltaJ=0.1 # bond dimerisation
Delta=0.5 # staggered potential
beta=100.0 # inverse temperature for Fermi-Dirac distribution
##### construct single-particle Hamiltonian #####
# define site-coupling lists
hop_pm=[[-J-deltaJ*(-1)**i,i,(i+1)%L] for i in range(L)] # PBC
hop_mp=[[+J+deltaJ*(-1)**i,i,(i+1)%L] for i in range(L)] # PBC
stagg_pot=[[Delta*(-1)**i,i] for i in range(L)]
# define static and dynamic lists
static=[["+-",hop_pm],["-+",hop_mp],['n',stagg_pot]]
dynamic=[]
# define basis
basis=spinless_fermion_basis_1d(L,Nf=1)
# build real-space Hamiltonian
H=hamiltonian(static,dynamic,basis=basis,dtype=np.float64)
# diagonalise real-space Hamiltonian
E,V=H.eigh()
##### compute Fourier transform and momentum-space Hamiltonian #####
# define momentm blocks and basis arguments
blocks=[dict(Nf=1,kblock=i,a=2) for i in range(L//2)] # only L//2 distinct momenta
basis_args = (L,)
# construct block-diagonal Hamiltonian
FT,Hblock = block_diag_hamiltonian(blocks,static,dynamic,spinless_fermion_basis_1d,
basis_args,np.complex128,get_proj_kwargs=dict(pcon=True))
# diagonalise momentum-space Hamiltonian
Eblock,Vblock=Hblock.eigh()
##### prepare the density observables and initial states #####
# grab single-particle states and treat them as initial states
psi0=Vblock
# construct operator n_1 = $n_{j=0}$
n_1_static=[['n',[[1.0,0]]]]
n_1=hamiltonian(n_1_static,[],basis=basis,dtype=np.float64,
check_herm=False,check_pcon=False)
# construct operator n_2 = $n_{j=L/2}$
n_2_static=[['n',[[1.0,L//2]]]]
n_2=hamiltonian(n_2_static,[],basis=basis,dtype=np.float64,
check_herm=False,check_pcon=False)
# transform n_j operators to momentum space
n_1=n_1.rotate_by(FT,generator=False)
n_2=n_2.rotate_by(FT,generator=False)
##### evaluate nonequal time correlator <FS|n_2(t) n_1(0)|FS> #####
# define time vector
t=np.linspace(0.0,90.0,901)
# calcualte state acted on by n_1
n_psi0=n_1.dot(psi0)
# construct time-evolution operator using exp_op class (sometimes faster)
U = exp_op(Hblock,a=-1j,start=t.min(),stop=t.max(),num=len(t),iterate=True)
# evolve states
psi_t=U.dot(psi0)
n_psi_t = U.dot(n_psi0)
# alternative method for time evolution using Hamiltonian class
#psi_t=Hblock.evolve(psi0,0.0,t,iterate=True)
#n_psi_t=Hblock.evolve(n_psi0,0.0,t,iterate=True)
# preallocate variable
correlators=np.zeros(t.shape+psi0.shape[1:])
# loop over the time-evolved states
for i, (psi,n_psi) in enumerate( zip(psi_t,n_psi_t) ):
correlators[i,:]=n_2.matrix_ele(psi,n_psi,diagonal=True).real
# evaluate correlator at finite temperature
n_FD=1.0/(np.exp(beta*E)+1.0)
correlator = (n_FD*correlators).sum(axis=-1)
##### plot spectra
plt.plot(np.arange(H.Ns),E/L,
marker='o',color='b',label='real space')
plt.plot(np.arange(Hblock.Ns),Eblock/L,
marker='x',color='r',markersize=2,label='momentum space')
plt.xlabel('state number',fontsize=16)
plt.ylabel('energy',fontsize=16)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.legend(fontsize=16)
plt.grid()
plt.tight_layout()
plt.savefig('example5a.pdf', bbox_inches='tight')
#plt.show()
plt.close()
##### plot correlator
plt.plot(t,correlator,linewidth=2)
plt.xlabel('$t$',fontsize=16)
plt.ylabel('$C_{0,L/2}(t,\\beta)$',fontsize=16)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.grid()
plt.tight_layout()
plt.savefig('example5b.pdf', bbox_inches='tight')
#plt.show()
plt.close() | bsd-3-clause |
jwiggins/scikit-image | doc/examples/xx_applications/plot_morphology.py | 6 | 8329 | """
=======================
Morphological Filtering
=======================
Morphological image processing is a collection of non-linear operations related
to the shape or morphology of features in an image, such as boundaries,
skeletons, etc. In any given technique, we probe an image with a small shape or
template called a structuring element, which defines the region of interest or
neighborhood around a pixel.
In this document we outline the following basic morphological operations:
1. Erosion
2. Dilation
3. Opening
4. Closing
5. White Tophat
6. Black Tophat
7. Skeletonize
8. Convex Hull
To get started, let's load an image using ``io.imread``. Note that morphology
functions only work on gray-scale or binary images, so we set ``as_grey=True``.
"""
import matplotlib.pyplot as plt
from skimage.data import data_dir
from skimage.util import img_as_ubyte
from skimage import io
phantom = img_as_ubyte(io.imread(data_dir+'/phantom.png', as_grey=True))
fig, ax = plt.subplots()
ax.imshow(phantom, cmap=plt.cm.gray)
"""
.. image:: PLOT2RST.current_figure
Let's also define a convenience function for plotting comparisons:
"""
def plot_comparison(original, filtered, filter_name):
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4), sharex=True, sharey=True)
ax1.imshow(original, cmap=plt.cm.gray)
ax1.set_title('original')
ax1.axis('off')
ax1.set_adjustable('box-forced')
ax2.imshow(filtered, cmap=plt.cm.gray)
ax2.set_title(filter_name)
ax2.axis('off')
ax2.set_adjustable('box-forced')
"""
Erosion
=======
Morphological ``erosion`` sets a pixel at (i, j) to the *minimum over all
pixels in the neighborhood centered at (i, j)*. The structuring element,
``selem``, passed to ``erosion`` is a boolean array that describes this
neighborhood. Below, we use ``disk`` to create a circular structuring element,
which we use for most of the following examples.
"""
from skimage.morphology import erosion, dilation, opening, closing, white_tophat
from skimage.morphology import black_tophat, skeletonize, convex_hull_image
from skimage.morphology import disk
selem = disk(6)
eroded = erosion(phantom, selem)
plot_comparison(phantom, eroded, 'erosion')
"""
.. image:: PLOT2RST.current_figure
Notice how the white boundary of the image disappears or gets eroded as we
increase the size of the disk. Also notice the increase in size of the two
black ellipses in the center and the disappearance of the 3 light grey
patches in the lower part of the image.
Dilation
========
Morphological ``dilation`` sets a pixel at (i, j) to the *maximum over all
pixels in the neighborhood centered at (i, j)*. Dilation enlarges bright
regions and shrinks dark regions.
"""
dilated = dilation(phantom, selem)
plot_comparison(phantom, dilated, 'dilation')
"""
.. image:: PLOT2RST.current_figure
Notice how the white boundary of the image thickens, or gets dilated, as we
increase the size of the disk. Also notice the decrease in size of the two
black ellipses in the centre, and the thickening of the light grey circle in
the center and the 3 patches in the lower part of the image.
Opening
=======
Morphological ``opening`` on an image is defined as an *erosion followed by a
dilation*. Opening can remove small bright spots (i.e. "salt") and connect
small dark cracks.
"""
opened = opening(phantom, selem)
plot_comparison(phantom, opened, 'opening')
"""
.. image:: PLOT2RST.current_figure
Since ``opening`` an image starts with an erosion operation, light regions that
are *smaller* than the structuring element are removed. The dilation operation
that follows ensures that light regions that are *larger* than the structuring
element retain their original size. Notice how the light and dark shapes in the
center their original thickness but the 3 lighter patches in the bottom get
completely eroded. The size dependence is highlighted by the outer white ring:
The parts of the ring thinner than the structuring element were completely
erased, while the thicker region at the top retains its original thickness.
Closing
=======
Morphological ``closing`` on an image is defined as a *dilation followed by an
erosion*. Closing can remove small dark spots (i.e. "pepper") and connect
small bright cracks.
To illustrate this more clearly, let's add a small crack to the white border:
"""
phantom = img_as_ubyte(io.imread(data_dir+'/phantom.png', as_grey=True))
phantom[10:30, 200:210] = 0
closed = closing(phantom, selem)
plot_comparison(phantom, closed, 'closing')
"""
.. image:: PLOT2RST.current_figure
Since ``closing`` an image starts with an dilation operation, dark regions
that are *smaller* than the structuring element are removed. The dilation
operation that follows ensures that dark regions that are *larger* than the
structuring element retain their original size. Notice how the white ellipses
at the bottom get connected because of dilation, but other dark region retain
their original sizes. Also notice how the crack we added is mostly removed.
White tophat
============
The ``white_tophat`` of an image is defined as the *image minus its
morphological opening*. This operation returns the bright spots of the image
that are smaller than the structuring element.
To make things interesting, we'll add bright and dark spots to the image:
"""
phantom = img_as_ubyte(io.imread(data_dir+'/phantom.png', as_grey=True))
phantom[340:350, 200:210] = 255
phantom[100:110, 200:210] = 0
w_tophat = white_tophat(phantom, selem)
plot_comparison(phantom, w_tophat, 'white tophat')
"""
.. image:: PLOT2RST.current_figure
As you can see, the 10-pixel wide white square is highlighted since it is
smaller than the structuring element. Also, the thin, white edges around most
of the ellipse are retained because they're smaller than the structuring
element, but the thicker region at the top disappears.
Black tophat
============
The ``black_tophat`` of an image is defined as its morphological **closing
minus the original image**. This operation returns the *dark spots of the
image that are smaller than the structuring element*.
"""
b_tophat = black_tophat(phantom, selem)
plot_comparison(phantom, b_tophat, 'black tophat')
"""
.. image:: PLOT2RST.current_figure
As you can see, the 10-pixel wide black square is highlighted since it is
smaller than the structuring element.
Duality
-------
As you should have noticed, many of these operations are simply the reverse
of another operation. This duality can be summarized as follows:
1. Erosion <-> Dilation
2. Opening <-> Closing
3. White tophat <-> Black tophat
Skeletonize
===========
Thinning is used to reduce each connected component in a binary image to a
*single-pixel wide skeleton*. It is important to note that this is performed
on binary images only.
"""
from skimage import img_as_bool
horse = ~img_as_bool(io.imread(data_dir+'/horse.png', as_grey=True))
sk = skeletonize(horse)
plot_comparison(horse, sk, 'skeletonize')
"""
.. image:: PLOT2RST.current_figure
As the name suggests, this technique is used to thin the image to 1-pixel wide
skeleton by applying thinning successively.
Convex hull
===========
The ``convex_hull_image`` is the *set of pixels included in the smallest
convex polygon that surround all white pixels in the input image*. Again note
that this is also performed on binary images.
"""
hull1 = convex_hull_image(horse)
plot_comparison(horse, hull1, 'convex hull')
"""
.. image:: PLOT2RST.current_figure
As the figure illustrates, ``convex_hull_image`` gives the smallest polygon
which covers the white or True completely in the image.
If we add a small grain to the image, we can see how the convex hull adapts to
enclose that grain:
"""
import numpy as np
horse2 = np.copy(horse)
horse2[45:50, 75:80] = 1
hull2 = convex_hull_image(horse2)
plot_comparison(horse2, hull2, 'convex hull')
"""
.. image:: PLOT2RST.current_figure
Additional Resources
====================
1. `MathWorks tutorial on morphological processing
<http://www.mathworks.com/help/images/morphology-fundamentals-dilation-and-erosion.html>`_
2. `Auckland university's tutorial on Morphological Image Processing
<http://www.cs.auckland.ac.nz/courses/compsci773s1c/lectures/ImageProcessing-html/topic4.htm>`_
3. http://en.wikipedia.org/wiki/Mathematical_morphology
"""
plt.show()
| bsd-3-clause |
linebp/pandas | doc/sphinxext/ipython_sphinxext/ipython_directive.py | 1 | 37844 | # -*- coding: utf-8 -*-
"""
Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives). For example, to enable syntax highlighting
and the IPython directive::
extensions = ['IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive']
The IPython directive outputs code-blocks with the language 'ipython'. So
if you do not have the syntax highlighting extension enabled as well, then
all rendered code-blocks will be uncolored. By default this directive assumes
that your prompts are unchanged IPython ones, but this can be customized.
The configurable options that can be placed in conf.py are:
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_mplbackend:
The string which specifies if the embedded Sphinx shell should import
Matplotlib and set the backend. The value specifies a backend that is
passed to `matplotlib.use()` before any lines in `ipython_execlines` are
executed. If not specified in conf.py, then the default value of 'agg' is
used. To use the IPython directive without matplotlib as a dependency, set
the value to `None`. It may end up that matplotlib is still imported
if the user specifies so in `ipython_execlines` or makes use of the
@savefig pseudo decorator.
ipython_execlines:
A list of strings to be exec'd in the embedded Sphinx shell. Typical
usage is to make certain packages always available. Set this to an empty
list if you wish to have no imports always available. If specified in
conf.py as `None`, then it has the effect of making no imports available.
If omitted from conf.py altogether, then the default value of
['import numpy as np', 'import matplotlib.pyplot as plt'] is used.
ipython_holdcount
When the @suppress pseudo-decorator is used, the execution count can be
incremented or not. The default behavior is to hold the execution count,
corresponding to a value of `True`. Set this to `False` to increment
the execution count after each suppressed command.
As an example, to use the IPython directive when `matplotlib` is not available,
one sets the backend to `None`::
ipython_mplbackend = None
An example usage of the directive is:
.. code-block:: rst
.. ipython::
In [1]: x = 1
In [2]: y = x**2
In [3]: print(y)
See http://matplotlib.org/sampledoc/ipython_directive.html for additional
documentation.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
from __future__ import print_function
from __future__ import unicode_literals
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import os
import re
import sys
import tempfile
import ast
from pandas.compat import zip, range, map, lmap, u, cStringIO as StringIO
import warnings
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
# Our own
try:
from traitlets.config import Config
except ImportError:
from IPython import Config
from IPython import InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
from IPython.utils.py3compat import PY3
if PY3:
from io import StringIO
text_type = str
else:
from StringIO import StringIO
text_type = unicode
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
nextline = nextline[Nc:]
if nextline and nextline[0] == ' ':
nextline = nextline[1:]
inputline += '\n' + nextline
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class DecodingStringIO(StringIO, object):
def __init__(self,buf='',encodings=('utf8',), *args, **kwds):
super(DecodingStringIO, self).__init__(buf, *args, **kwds)
self.set_encodings(encodings)
def set_encodings(self, encodings):
self.encodings = encodings
def write(self,data):
if isinstance(data, text_type):
return super(DecodingStringIO, self).write(data)
else:
for enc in self.encodings:
try:
data = data.decode(enc)
return super(DecodingStringIO, self).write(data)
except :
pass
# default to brute utf8 if no encoding succeded
return super(DecodingStringIO, self).write(data.decode('utf8', 'replace'))
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self, exec_lines=None,state=None):
self.cout = DecodingStringIO(u'')
if exec_lines is None:
exec_lines = []
self.state = state
# Create config object for IPython
config = Config()
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize global ipython, but don't start its mainloop.
# This will persist across different EmbededSphinxShell instances.
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done after instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# Optionally, provide more detailed information to shell.
self.directive = None
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
# Prepopulate the namespace.
for line in exec_lines:
self.process_input_line(line, store_history=False)
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
try:
source_raw = splitter.source_raw_reset()[1]
except:
# recent ipython #4504
source_raw = splitter.raw_reset()
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""
Process data block for INPUT token.
"""
decorator, input, rest = data
image_file = None
image_directive = None
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = (decorator is not None and \
decorator.startswith('@doctest')) or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_okexcept = decorator=='@okexcept' or self.is_okexcept
is_okwarning = decorator=='@okwarning' or self.is_okwarning
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
# set the encodings to be used by DecodingStringIO
# to convert the execution output into unicode if
# needed. this attrib is set by IpythonDirective.run()
# based on the specified block options, defaulting to ['ut
self.cout.set_encodings(self.output_encoding)
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
# Hold the execution count, if requested to do so.
if is_suppress and self.hold_count:
store_history = False
else:
store_history = True
# Note: catch_warnings is not thread safe
with warnings.catch_warnings(record=True) as ws:
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i == 0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
elif is_semicolon: # get spacing right
ret.append('')
# context information
filename = self.state.document.current_source
lineno = self.state.document.current_line
# output any exceptions raised during execution to stdout
# unless :okexcept: has been specified.
if not is_okexcept and "Traceback" in output:
s = "\nException in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write(output)
sys.stdout.write('<<<' + ('-' * 73) + '\n\n')
# output any warning raised during execution to stdout
# unless :okwarning: has been specified.
if not is_okwarning:
for w in ws:
s = "\nWarning in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write('-' * 76 + '\n')
s=warnings.formatwarning(w.message, w.category,
w.filename, w.lineno, w.line)
sys.stdout.write(s)
sys.stdout.write('<<<' + ('-' * 73) + '\n')
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, decorator, image_file,
image_directive)
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, decorator, image_file):
"""
Process data block for OUTPUT token.
"""
TAB = ' ' * 4
if is_doctest and output is not None:
found = output
found = found.strip()
submitted = data.strip()
if self.directive is None:
source = 'Unavailable'
content = 'Unavailable'
else:
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join([TAB + line for line in content])
# Make sure the output contains the output prompt.
ind = found.find(output_prompt)
if ind < 0:
e = ('output does not contain output prompt\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'Input line(s):\n{TAB}{2}\n\n'
'Output line(s):\n{TAB}{3}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), TAB=TAB)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
# Handle the actual doctest comparison.
if decorator.strip() == '@doctest':
# Standard doctest
if found != submitted:
e = ('doctest failure\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'On input line(s):\n{TAB}{2}\n\n'
'we found output:\n{TAB}{3}\n\n'
'instead of the expected:\n{TAB}{4}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), repr(submitted), TAB=TAB)
raise RuntimeError(e)
else:
self.custom_doctest(decorator, input_lines, found, submitted)
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = ('plt.gcf().savefig("%s", bbox_inches="tight", '
'dpi=100)' % image_file)
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin % lineno
output_prompt = self.promptout % lineno
image_file = None
image_directive = None
for token, data in block:
if token == COMMENT:
out_data = self.process_comment(data)
elif token == INPUT:
(out_data, input_lines, output, is_doctest, decorator,
image_file, image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token == OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
decorator, image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
"""
Ensures that pyplot has been imported into the embedded IPython shell.
Also, makes sure to set the backend appropriately if not set already.
"""
# We are here if the @figure pseudo decorator was used. Thus, it's
# possible that we could be here even if python_mplbackend were set to
# `None`. That's also strange and perhaps worthy of raising an
# exception, but for now, we just set the backend to 'agg'.
if not self._pyplot_imported:
if 'matplotlib.backends' not in sys.modules:
# Then ipython_matplotlib was set to None but there was a
# call to the @figure decorator (and ipython_execlines did
# not set a backend).
#raise Exception("No backend was set, but @figure was used!")
import matplotlib
matplotlib.use('agg')
# Always import pyplot into embedded shell.
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
self._pyplot_imported = True
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive content
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
# if the next line is indented, it should be part of multiline
if len(content) > lineno + 1:
nextline = content[lineno + 1]
if len(nextline) - len(nextline.lstrip()) > 3:
continue
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
def custom_doctest(self, decorator, input_lines, found, submitted):
"""
Perform a specialized doctest.
"""
from .custom_doctests import doctests
args = decorator.split()
doctest_type = args[1]
if doctest_type in doctests:
doctests[doctest_type](self, args, input_lines, found, submitted)
else:
e = "Invalid option to @doctest: {0}".format(doctest_type)
raise Exception(e)
class IPythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
'okexcept': directives.flag,
'okwarning': directives.flag,
'output_encoding': directives.unchanged_required
}
shell = None
seen_docs = set()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
mplbackend = config.ipython_mplbackend
exec_lines = config.ipython_execlines
hold_count = config.ipython_holdcount
return (savefig_dir, source_dir, rgxin, rgxout,
promptin, promptout, mplbackend, exec_lines, hold_count)
def setup(self):
# Get configuration values.
(savefig_dir, source_dir, rgxin, rgxout, promptin, promptout,
mplbackend, exec_lines, hold_count) = self.get_config_options()
if self.shell is None:
# We will be here many times. However, when the
# EmbeddedSphinxShell is created, its interactive shell member
# is the same for each instance.
if mplbackend and 'matplotlib.backends' not in sys.modules:
import matplotlib
# Repeated calls to use() will not hurt us since `mplbackend`
# is the same each time.
matplotlib.use(mplbackend)
# Must be called after (potentially) importing matplotlib and
# setting its backend since exec_lines might import pylab.
self.shell = EmbeddedSphinxShell(exec_lines, self.state)
# Store IPython directive to enable better error messages
self.shell.directive = self
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
if self.state.document.current_source not in self.seen_docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
try:
self.shell.IP.prompt_manager.width = 0
except AttributeError:
# GH14003: class promptManager has removed after IPython 5.x
pass
self.seen_docs.add(self.state.document.current_source)
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
self.shell.hold_count = hold_count
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
self.shell.is_okexcept = 'okexcept' in options
self.shell.is_okwarning = 'okwarning' in options
self.shell.output_encoding = [options.get('output_encoding', 'utf8')]
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython', '']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
if len(lines)>2:
if debug:
print('\n'.join(lines))
else:
# This has to do with input, not output. But if we comment
# these lines out, then no IPython code will appear in the
# final output.
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
# cleanup
self.teardown()
return []
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IPythonDirective)
app.add_config_value('ipython_savefig_dir', None, 'env')
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_promptin', 'In [%d]:', 'env')
app.add_config_value('ipython_promptout', 'Out[%d]:', 'env')
# We could just let matplotlib pick whatever is specified as the default
# backend in the matplotlibrc file, but this would cause issues if the
# backend didn't work in headless environments. For this reason, 'agg'
# is a good default backend choice.
app.add_config_value('ipython_mplbackend', 'agg', 'env')
# If the user sets this config value to `None`, then EmbeddedSphinxShell's
# __init__ method will treat it as [].
execlines = ['import numpy as np', 'import matplotlib.pyplot as plt']
app.add_config_value('ipython_execlines', execlines, 'env')
app.add_config_value('ipython_holdcount', True, 'env')
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
@doctest float
In [154]: 0.1 + 0.2
Out[154]: 0.3
@doctest float
In [155]: np.arange(16).reshape(4,4)
Out[155]:
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
In [1]: x = np.arange(16, dtype=float).reshape(4,4)
In [2]: x[0,0] = np.inf
In [3]: x[0,1] = np.nan
@doctest float
In [4]: x
Out[4]:
array([[ inf, nan, 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
IPythonDirective('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print('All OK? Check figures in _static/')
| bsd-3-clause |
DGrady/pandas | pandas/plotting/_style.py | 11 | 8329 | # being a bit too dynamic
# pylint: disable=E1101
from __future__ import division
import warnings
from contextlib import contextmanager
import re
import numpy as np
from pandas.core.dtypes.common import is_list_like
from pandas.compat import range, lrange, lmap
import pandas.compat as compat
from pandas.plotting._compat import _mpl_ge_2_0_0
# Extracted from https://gist.github.com/huyng/816622
# this is the rcParams set when setting display.with_mpl_style
# to True.
mpl_stylesheet = {
'axes.axisbelow': True,
'axes.color_cycle': ['#348ABD',
'#7A68A6',
'#A60628',
'#467821',
'#CF4457',
'#188487',
'#E24A33'],
'axes.edgecolor': '#bcbcbc',
'axes.facecolor': '#eeeeee',
'axes.grid': True,
'axes.labelcolor': '#555555',
'axes.labelsize': 'large',
'axes.linewidth': 1.0,
'axes.titlesize': 'x-large',
'figure.edgecolor': 'white',
'figure.facecolor': 'white',
'figure.figsize': (6.0, 4.0),
'figure.subplot.hspace': 0.5,
'font.family': 'monospace',
'font.monospace': ['Andale Mono',
'Nimbus Mono L',
'Courier New',
'Courier',
'Fixed',
'Terminal',
'monospace'],
'font.size': 10,
'interactive': True,
'keymap.all_axes': ['a'],
'keymap.back': ['left', 'c', 'backspace'],
'keymap.forward': ['right', 'v'],
'keymap.fullscreen': ['f'],
'keymap.grid': ['g'],
'keymap.home': ['h', 'r', 'home'],
'keymap.pan': ['p'],
'keymap.save': ['s'],
'keymap.xscale': ['L', 'k'],
'keymap.yscale': ['l'],
'keymap.zoom': ['o'],
'legend.fancybox': True,
'lines.antialiased': True,
'lines.linewidth': 1.0,
'patch.antialiased': True,
'patch.edgecolor': '#EEEEEE',
'patch.facecolor': '#348ABD',
'patch.linewidth': 0.5,
'toolbar': 'toolbar2',
'xtick.color': '#555555',
'xtick.direction': 'in',
'xtick.major.pad': 6.0,
'xtick.major.size': 0.0,
'xtick.minor.pad': 6.0,
'xtick.minor.size': 0.0,
'ytick.color': '#555555',
'ytick.direction': 'in',
'ytick.major.pad': 6.0,
'ytick.major.size': 0.0,
'ytick.minor.pad': 6.0,
'ytick.minor.size': 0.0
}
def _get_standard_colors(num_colors=None, colormap=None, color_type='default',
color=None):
import matplotlib.pyplot as plt
if color is None and colormap is not None:
if isinstance(colormap, compat.string_types):
import matplotlib.cm as cm
cmap = colormap
colormap = cm.get_cmap(colormap)
if colormap is None:
raise ValueError("Colormap {0} is not recognized".format(cmap))
colors = lmap(colormap, np.linspace(0, 1, num=num_colors))
elif color is not None:
if colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
colors = list(color) if is_list_like(color) else color
else:
if color_type == 'default':
# need to call list() on the result to copy so we don't
# modify the global rcParams below
try:
colors = [c['color']
for c in list(plt.rcParams['axes.prop_cycle'])]
except KeyError:
colors = list(plt.rcParams.get('axes.color_cycle',
list('bgrcmyk')))
if isinstance(colors, compat.string_types):
colors = list(colors)
elif color_type == 'random':
import random
def random_color(column):
random.seed(column)
return [random.random() for _ in range(3)]
colors = lmap(random_color, lrange(num_colors))
else:
raise ValueError("color_type must be either 'default' or 'random'")
if isinstance(colors, compat.string_types):
import matplotlib.colors
conv = matplotlib.colors.ColorConverter()
def _maybe_valid_colors(colors):
try:
[conv.to_rgba(c) for c in colors]
return True
except ValueError:
return False
# check whether the string can be convertable to single color
maybe_single_color = _maybe_valid_colors([colors])
# check whether each character can be convertable to colors
maybe_color_cycle = _maybe_valid_colors(list(colors))
if maybe_single_color and maybe_color_cycle and len(colors) > 1:
# Special case for single str 'CN' match and convert to hex
# for supporting matplotlib < 2.0.0
if re.match(r'\AC[0-9]\Z', colors) and _mpl_ge_2_0_0():
hex_color = [c['color']
for c in list(plt.rcParams['axes.prop_cycle'])]
colors = [hex_color[int(colors[1])]]
else:
# this may no longer be required
msg = ("'{0}' can be parsed as both single color and "
"color cycle. Specify each color using a list "
"like ['{0}'] or {1}")
raise ValueError(msg.format(colors, list(colors)))
elif maybe_single_color:
colors = [colors]
else:
# ``colors`` is regarded as color cycle.
# mpl will raise error any of them is invalid
pass
if len(colors) != num_colors:
try:
multiple = num_colors // len(colors) - 1
except ZeroDivisionError:
raise ValueError("Invalid color argument: ''")
mod = num_colors % len(colors)
colors += multiple * colors
colors += colors[:mod]
return colors
class _Options(dict):
"""
Stores pandas plotting options.
Allows for parameter aliasing so you can just use parameter names that are
the same as the plot function parameters, but is stored in a canonical
format that makes it easy to breakdown into groups later
"""
# alias so the names are same as plotting method parameter names
_ALIASES = {'x_compat': 'xaxis.compat'}
_DEFAULT_KEYS = ['xaxis.compat']
def __init__(self, deprecated=False):
self._deprecated = deprecated
# self['xaxis.compat'] = False
super(_Options, self).__setitem__('xaxis.compat', False)
def _warn_if_deprecated(self):
if self._deprecated:
warnings.warn("'pandas.plot_params' is deprecated. Use "
"'pandas.plotting.plot_params' instead",
FutureWarning, stacklevel=3)
def __getitem__(self, key):
self._warn_if_deprecated()
key = self._get_canonical_key(key)
if key not in self:
raise ValueError('%s is not a valid pandas plotting option' % key)
return super(_Options, self).__getitem__(key)
def __setitem__(self, key, value):
self._warn_if_deprecated()
key = self._get_canonical_key(key)
return super(_Options, self).__setitem__(key, value)
def __delitem__(self, key):
key = self._get_canonical_key(key)
if key in self._DEFAULT_KEYS:
raise ValueError('Cannot remove default parameter %s' % key)
return super(_Options, self).__delitem__(key)
def __contains__(self, key):
key = self._get_canonical_key(key)
return super(_Options, self).__contains__(key)
def reset(self):
"""
Reset the option store to its initial state
Returns
-------
None
"""
self._warn_if_deprecated()
self.__init__()
def _get_canonical_key(self, key):
return self._ALIASES.get(key, key)
@contextmanager
def use(self, key, value):
"""
Temporarily set a parameter value using the with statement.
Aliasing allowed.
"""
self._warn_if_deprecated()
old_value = self[key]
try:
self[key] = value
yield self
finally:
self[key] = old_value
plot_params = _Options()
| bsd-3-clause |
cybernet14/scikit-learn | sklearn/svm/classes.py | 126 | 40114 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive'
(default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/tests/test_groupby.py | 1 | 249119 | # -*- coding: utf-8 -*-
from __future__ import print_function
import nose
from datetime import datetime
from numpy import nan
from pandas import date_range, bdate_range, Timestamp
from pandas.core.index import Index, MultiIndex, CategoricalIndex
from pandas.core.api import Categorical, DataFrame
from pandas.core.groupby import (SpecificationError, DataError, _nargsort,
_lexsort_indexer)
from pandas.core.series import Series
from pandas.core.config import option_context
from pandas.formats.printing import pprint_thing
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_index_equal, assertRaisesRegexp)
from pandas.compat import (range, long, lrange, StringIO, lmap, lzip, map, zip,
builtins, OrderedDict, product as cart_product)
from pandas import compat
from pandas.core.panel import Panel
from pandas.tools.merge import concat
from collections import defaultdict
from functools import partial
import pandas.core.common as com
import numpy as np
import pandas.core.nanops as nanops
import pandas.util.testing as tm
import pandas as pd
from numpy.testing import assert_equal
class TestGroupBy(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.ts = tm.makeTimeSeries()
self.seriesd = tm.getSeriesData()
self.tsd = tm.getTimeSeriesData()
self.frame = DataFrame(self.seriesd)
self.tsframe = DataFrame(self.tsd)
self.df = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
self.df_mixed_floats = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.array(
np.random.randn(8), dtype='float32')})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.mframe = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self.three_group = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_basic(self):
def checkit(dtype):
data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
for k, v in grouped:
self.assertEqual(len(v), 3)
agged = grouped.aggregate(np.mean)
self.assertEqual(agged[1], 1)
assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
assert_series_equal(agged, grouped.mean())
assert_series_equal(grouped.agg(np.sum), grouped.sum())
expected = grouped.apply(lambda x: x * x.sum())
transformed = grouped.transform(lambda x: x * x.sum())
self.assertEqual(transformed[7], 12)
assert_series_equal(transformed, expected)
value_grouped = data.groupby(data)
assert_series_equal(value_grouped.aggregate(np.mean), agged,
check_index_type=False)
# complex agg
agged = grouped.aggregate([np.mean, np.std])
agged = grouped.aggregate({'one': np.mean, 'two': np.std})
group_constants = {0: 10, 1: 20, 2: 30}
agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())
self.assertEqual(agged[1], 21)
# corner cases
self.assertRaises(Exception, grouped.aggregate, lambda x: x * 2)
for dtype in ['int64', 'int32', 'float64', 'float32']:
checkit(dtype)
def test_select_bad_cols(self):
df = DataFrame([[1, 2]], columns=['A', 'B'])
g = df.groupby('A')
self.assertRaises(KeyError, g.__getitem__, ['C']) # g[['C']]
self.assertRaises(KeyError, g.__getitem__, ['A', 'C']) # g[['A', 'C']]
with assertRaisesRegexp(KeyError, '^[^A]+$'):
# A should not be referenced as a bad column...
# will have to rethink regex if you change message!
g[['A', 'C']]
def test_first_last_nth(self):
# tests for first / last / nth
grouped = self.df.groupby('A')
first = grouped.first()
expected = self.df.ix[[1, 0], ['B', 'C', 'D']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(first, expected)
nth = grouped.nth(0)
assert_frame_equal(nth, expected)
last = grouped.last()
expected = self.df.ix[[5, 7], ['B', 'C', 'D']]
expected.index = Index(['bar', 'foo'], name='A')
assert_frame_equal(last, expected)
nth = grouped.nth(-1)
assert_frame_equal(nth, expected)
nth = grouped.nth(1)
expected = self.df.ix[[2, 3], ['B', 'C', 'D']].copy()
expected.index = Index(['foo', 'bar'], name='A')
expected = expected.sort_index()
assert_frame_equal(nth, expected)
# it works!
grouped['B'].first()
grouped['B'].last()
grouped['B'].nth(0)
self.df.loc[self.df['A'] == 'foo', 'B'] = np.nan
self.assertTrue(com.isnull(grouped['B'].first()['foo']))
self.assertTrue(com.isnull(grouped['B'].last()['foo']))
self.assertTrue(com.isnull(grouped['B'].nth(0)['foo']))
# v0.14.0 whatsnew
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
result = g.first()
expected = df.iloc[[1, 2]].set_index('A')
assert_frame_equal(result, expected)
expected = df.iloc[[1, 2]].set_index('A')
result = g.nth(0, dropna='any')
assert_frame_equal(result, expected)
def test_first_last_nth_dtypes(self):
df = self.df_mixed_floats.copy()
df['E'] = True
df['F'] = 1
# tests for first / last / nth
grouped = df.groupby('A')
first = grouped.first()
expected = df.ix[[1, 0], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(first, expected)
last = grouped.last()
expected = df.ix[[5, 7], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(last, expected)
nth = grouped.nth(1)
expected = df.ix[[3, 2], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(nth, expected)
# GH 2763, first/last shifting dtypes
idx = lrange(10)
idx.append(9)
s = Series(data=lrange(11), index=idx, name='IntCol')
self.assertEqual(s.dtype, 'int64')
f = s.groupby(level=0).first()
self.assertEqual(f.dtype, 'int64')
def test_nth(self):
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
assert_frame_equal(g.nth(0), df.iloc[[0, 2]].set_index('A'))
assert_frame_equal(g.nth(1), df.iloc[[1]].set_index('A'))
assert_frame_equal(g.nth(2), df.loc[[]].set_index('A'))
assert_frame_equal(g.nth(-1), df.iloc[[1, 2]].set_index('A'))
assert_frame_equal(g.nth(-2), df.iloc[[0]].set_index('A'))
assert_frame_equal(g.nth(-3), df.loc[[]].set_index('A'))
assert_series_equal(g.B.nth(0), df.set_index('A').B.iloc[[0, 2]])
assert_series_equal(g.B.nth(1), df.set_index('A').B.iloc[[1]])
assert_frame_equal(g[['B']].nth(0),
df.ix[[0, 2], ['A', 'B']].set_index('A'))
exp = df.set_index('A')
assert_frame_equal(g.nth(0, dropna='any'), exp.iloc[[1, 2]])
assert_frame_equal(g.nth(-1, dropna='any'), exp.iloc[[1, 2]])
exp['B'] = np.nan
assert_frame_equal(g.nth(7, dropna='any'), exp.iloc[[1, 2]])
assert_frame_equal(g.nth(2, dropna='any'), exp.iloc[[1, 2]])
# out of bounds, regression from 0.13.1
# GH 6621
df = DataFrame({'color': {0: 'green',
1: 'green',
2: 'red',
3: 'red',
4: 'red'},
'food': {0: 'ham',
1: 'eggs',
2: 'eggs',
3: 'ham',
4: 'pork'},
'two': {0: 1.5456590000000001,
1: -0.070345000000000005,
2: -2.4004539999999999,
3: 0.46206000000000003,
4: 0.52350799999999997},
'one': {0: 0.56573799999999996,
1: -0.9742360000000001,
2: 1.033801,
3: -0.78543499999999999,
4: 0.70422799999999997}}).set_index(['color',
'food'])
result = df.groupby(level=0, as_index=False).nth(2)
expected = df.iloc[[-1]]
assert_frame_equal(result, expected)
result = df.groupby(level=0, as_index=False).nth(3)
expected = df.loc[[]]
assert_frame_equal(result, expected)
# GH 7559
# from the vbench
df = DataFrame(np.random.randint(1, 10, (100, 2)), dtype='int64')
s = df[1]
g = df[0]
expected = s.groupby(g).first()
expected2 = s.groupby(g).apply(lambda x: x.iloc[0])
assert_series_equal(expected2, expected, check_names=False)
self.assertTrue(expected.name, 0)
self.assertEqual(expected.name, 1)
# validate first
v = s[g == 1].iloc[0]
self.assertEqual(expected.iloc[0], v)
self.assertEqual(expected2.iloc[0], v)
# this is NOT the same as .first (as sorted is default!)
# as it keeps the order in the series (and not the group order)
# related GH 7287
expected = s.groupby(g, sort=False).first()
result = s.groupby(g, sort=False).nth(0, dropna='all')
assert_series_equal(result, expected)
# doc example
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
result = g.B.nth(0, dropna=True)
expected = g.B.first()
assert_series_equal(result, expected)
# test multiple nth values
df = DataFrame([[1, np.nan], [1, 3], [1, 4], [5, 6], [5, 7]],
columns=['A', 'B'])
g = df.groupby('A')
assert_frame_equal(g.nth(0), df.iloc[[0, 3]].set_index('A'))
assert_frame_equal(g.nth([0]), df.iloc[[0, 3]].set_index('A'))
assert_frame_equal(g.nth([0, 1]), df.iloc[[0, 1, 3, 4]].set_index('A'))
assert_frame_equal(
g.nth([0, -1]), df.iloc[[0, 2, 3, 4]].set_index('A'))
assert_frame_equal(
g.nth([0, 1, 2]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))
assert_frame_equal(
g.nth([0, 1, -1]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))
assert_frame_equal(g.nth([2]), df.iloc[[2]].set_index('A'))
assert_frame_equal(g.nth([3, 4]), df.loc[[]].set_index('A'))
business_dates = pd.date_range(start='4/1/2014', end='6/30/2014',
freq='B')
df = DataFrame(1, index=business_dates, columns=['a', 'b'])
# get the first, fourth and last two business days for each month
key = (df.index.year, df.index.month)
result = df.groupby(key, as_index=False).nth([0, 3, -2, -1])
expected_dates = pd.to_datetime(
['2014/4/1', '2014/4/4', '2014/4/29', '2014/4/30', '2014/5/1',
'2014/5/6', '2014/5/29', '2014/5/30', '2014/6/2', '2014/6/5',
'2014/6/27', '2014/6/30'])
expected = DataFrame(1, columns=['a', 'b'], index=expected_dates)
assert_frame_equal(result, expected)
def test_nth_multi_index(self):
# PR 9090, related to issue 8979
# test nth on MultiIndex, should match .first()
grouped = self.three_group.groupby(['A', 'B'])
result = grouped.nth(0)
expected = grouped.first()
assert_frame_equal(result, expected)
def test_nth_multi_index_as_expected(self):
# PR 9090, related to issue 8979
# test nth on MultiIndex
three_group = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny']})
grouped = three_group.groupby(['A', 'B'])
result = grouped.nth(0)
expected = DataFrame(
{'C': ['dull', 'dull', 'dull', 'dull']},
index=MultiIndex.from_arrays([['bar', 'bar', 'foo', 'foo'],
['one', 'two', 'one', 'two']],
names=['A', 'B']))
assert_frame_equal(result, expected)
def test_grouper_index_types(self):
# related GH5375
# groupby misbehaving when using a Floatlike index
df = DataFrame(np.arange(10).reshape(5, 2), columns=list('AB'))
for index in [tm.makeFloatIndex, tm.makeStringIndex,
tm.makeUnicodeIndex, tm.makeIntIndex, tm.makeDateIndex,
tm.makePeriodIndex]:
df.index = index(len(df))
df.groupby(list('abcde')).apply(lambda x: x)
df.index = list(reversed(df.index.tolist()))
df.groupby(list('abcde')).apply(lambda x: x)
def test_grouper_multilevel_freq(self):
# GH 7885
# with level and freq specified in a pd.Grouper
from datetime import date, timedelta
d0 = date.today() - timedelta(days=14)
dates = date_range(d0, date.today())
date_index = pd.MultiIndex.from_product(
[dates, dates], names=['foo', 'bar'])
df = pd.DataFrame(np.random.randint(0, 100, 225), index=date_index)
# Check string level
expected = df.reset_index().groupby([pd.Grouper(
key='foo', freq='W'), pd.Grouper(key='bar', freq='W')]).sum()
# reset index changes columns dtype to object
expected.columns = pd.Index([0], dtype='int64')
result = df.groupby([pd.Grouper(level='foo', freq='W'), pd.Grouper(
level='bar', freq='W')]).sum()
assert_frame_equal(result, expected)
# Check integer level
result = df.groupby([pd.Grouper(level=0, freq='W'), pd.Grouper(
level=1, freq='W')]).sum()
assert_frame_equal(result, expected)
def test_grouper_creation_bug(self):
# GH 8795
df = DataFrame({'A': [0, 0, 1, 1, 2, 2], 'B': [1, 2, 3, 4, 5, 6]})
g = df.groupby('A')
expected = g.sum()
g = df.groupby(pd.Grouper(key='A'))
result = g.sum()
assert_frame_equal(result, expected)
result = g.apply(lambda x: x.sum())
assert_frame_equal(result, expected)
g = df.groupby(pd.Grouper(key='A', axis=0))
result = g.sum()
assert_frame_equal(result, expected)
# GH8866
s = Series(np.arange(8, dtype='int64'),
index=pd.MultiIndex.from_product(
[list('ab'), range(2),
date_range('20130101', periods=2)],
names=['one', 'two', 'three']))
result = s.groupby(pd.Grouper(level='three', freq='M')).sum()
expected = Series([28], index=Index(
[Timestamp('2013-01-31')], freq='M', name='three'))
assert_series_equal(result, expected)
# just specifying a level breaks
result = s.groupby(pd.Grouper(level='one')).sum()
expected = s.groupby(level='one').sum()
assert_series_equal(result, expected)
def test_grouper_getting_correct_binner(self):
# GH 10063
# using a non-time-based grouper and a time-based grouper
# and specifying levels
df = DataFrame({'A': 1}, index=pd.MultiIndex.from_product(
[list('ab'), date_range('20130101', periods=80)], names=['one',
'two']))
result = df.groupby([pd.Grouper(level='one'), pd.Grouper(
level='two', freq='M')]).sum()
expected = DataFrame({'A': [31, 28, 21, 31, 28, 21]},
index=MultiIndex.from_product(
[list('ab'),
date_range('20130101', freq='M', periods=3)],
names=['one', 'two']))
assert_frame_equal(result, expected)
def test_grouper_iter(self):
self.assertEqual(sorted(self.df.groupby('A').grouper), ['bar', 'foo'])
def test_empty_groups(self):
# GH # 1048
self.assertRaises(ValueError, self.df.groupby, [])
def test_groupby_grouper(self):
grouped = self.df.groupby('A')
result = self.df.groupby(grouped.grouper).mean()
expected = grouped.mean()
assert_frame_equal(result, expected)
def test_groupby_duplicated_column_errormsg(self):
# GH7511
df = DataFrame(columns=['A', 'B', 'A', 'C'],
data=[range(4), range(2, 6), range(0, 8, 2)])
self.assertRaises(ValueError, df.groupby, 'A')
self.assertRaises(ValueError, df.groupby, ['A', 'B'])
grouped = df.groupby('B')
c = grouped.count()
self.assertTrue(c.columns.nlevels == 1)
self.assertTrue(c.columns.size == 3)
def test_groupby_dict_mapping(self):
# GH #679
from pandas import Series
s = Series({'T1': 5})
result = s.groupby({'T1': 'T2'}).agg(sum)
expected = s.groupby(['T2']).agg(sum)
assert_series_equal(result, expected)
s = Series([1., 2., 3., 4.], index=list('abcd'))
mapping = {'a': 0, 'b': 0, 'c': 1, 'd': 1}
result = s.groupby(mapping).mean()
result2 = s.groupby(mapping).agg(np.mean)
expected = s.groupby([0, 0, 1, 1]).mean()
expected2 = s.groupby([0, 0, 1, 1]).mean()
assert_series_equal(result, expected)
assert_series_equal(result, result2)
assert_series_equal(result, expected2)
def test_groupby_bounds_check(self):
# groupby_X is code-generated, so if one variant
# does, the rest probably do to
a = np.array([1, 2], dtype='object')
b = np.array([1, 2, 3], dtype='object')
self.assertRaises(AssertionError, pd.algos.groupby_object, a, b)
def test_groupby_grouper_f_sanity_checked(self):
dates = date_range('01-Jan-2013', periods=12, freq='MS')
ts = Series(np.random.randn(12), index=dates)
# GH3035
# index.map is used to apply grouper to the index
# if it fails on the elements, map tries it on the entire index as
# a sequence. That can yield invalid results that cause trouble
# down the line.
# the surprise comes from using key[0:6] rather then str(key)[0:6]
# when the elements are Timestamp.
# the result is Index[0:6], very confusing.
self.assertRaises(AssertionError, ts.groupby, lambda key: key[0:6])
def test_groupby_nonobject_dtype(self):
key = self.mframe.index.labels[0]
grouped = self.mframe.groupby(key)
result = grouped.sum()
expected = self.mframe.groupby(key.astype('O')).sum()
assert_frame_equal(result, expected)
# GH 3911, mixed frame non-conversion
df = self.df_mixed_floats.copy()
df['value'] = lrange(len(df))
def max_value(group):
return group.ix[group['value'].idxmax()]
applied = df.groupby('A').apply(max_value)
result = applied.get_dtype_counts().sort_values()
expected = Series({'object': 2,
'float64': 2,
'int64': 1}).sort_values()
assert_series_equal(result, expected)
def test_groupby_return_type(self):
# GH2893, return a reduced type
df1 = DataFrame([{"val1": 1,
"val2": 20}, {"val1": 1,
"val2": 19}, {"val1": 2,
"val2": 27}, {"val1": 2,
"val2": 12}
])
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
result = df1.groupby("val1", squeeze=True).apply(func)
tm.assertIsInstance(result, Series)
df2 = DataFrame([{"val1": 1,
"val2": 20}, {"val1": 1,
"val2": 19}, {"val1": 1,
"val2": 27}, {"val1": 1,
"val2": 12}
])
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
result = df2.groupby("val1", squeeze=True).apply(func)
tm.assertIsInstance(result, Series)
# GH3596, return a consistent type (regression in 0.11 from 0.10.1)
df = DataFrame([[1, 1], [1, 1]], columns=['X', 'Y'])
result = df.groupby('X', squeeze=False).count()
tm.assertIsInstance(result, DataFrame)
# GH5592
# inconcistent return type
df = DataFrame(dict(A=['Tiger', 'Tiger', 'Tiger', 'Lamb', 'Lamb',
'Pony', 'Pony'], B=Series(
np.arange(7), dtype='int64'), C=date_range(
'20130101', periods=7)))
def f(grp):
return grp.iloc[0]
expected = df.groupby('A').first()[['B']]
result = df.groupby('A').apply(f)[['B']]
assert_frame_equal(result, expected)
def f(grp):
if grp.name == 'Tiger':
return None
return grp.iloc[0]
result = df.groupby('A').apply(f)[['B']]
e = expected.copy()
e.loc['Tiger'] = np.nan
assert_frame_equal(result, e)
def f(grp):
if grp.name == 'Pony':
return None
return grp.iloc[0]
result = df.groupby('A').apply(f)[['B']]
e = expected.copy()
e.loc['Pony'] = np.nan
assert_frame_equal(result, e)
# 5592 revisited, with datetimes
def f(grp):
if grp.name == 'Pony':
return None
return grp.iloc[0]
result = df.groupby('A').apply(f)[['C']]
e = df.groupby('A').first()[['C']]
e.loc['Pony'] = pd.NaT
assert_frame_equal(result, e)
# scalar outputs
def f(grp):
if grp.name == 'Pony':
return None
return grp.iloc[0].loc['C']
result = df.groupby('A').apply(f)
e = df.groupby('A').first()['C'].copy()
e.loc['Pony'] = np.nan
e.name = None
assert_series_equal(result, e)
def test_agg_api(self):
# GH 6337
# http://stackoverflow.com/questions/21706030/pandas-groupby-agg-function-column-dtype-error
# different api for agg when passed custom function with mixed frame
df = DataFrame({'data1': np.random.randn(5),
'data2': np.random.randn(5),
'key1': ['a', 'a', 'b', 'b', 'a'],
'key2': ['one', 'two', 'one', 'two', 'one']})
grouped = df.groupby('key1')
def peak_to_peak(arr):
return arr.max() - arr.min()
expected = grouped.agg([peak_to_peak])
expected.columns = ['data1', 'data2']
result = grouped.agg(peak_to_peak)
assert_frame_equal(result, expected)
def test_agg_regression1(self):
grouped = self.tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.mean)
expected = grouped.mean()
assert_frame_equal(result, expected)
def test_agg_datetimes_mixed(self):
data = [[1, '2012-01-01', 1.0], [2, '2012-01-02', 2.0], [3, None, 3.0]]
df1 = DataFrame({'key': [x[0] for x in data],
'date': [x[1] for x in data],
'value': [x[2] for x in data]})
data = [[row[0], datetime.strptime(row[1], '%Y-%m-%d').date() if row[1]
else None, row[2]] for row in data]
df2 = DataFrame({'key': [x[0] for x in data],
'date': [x[1] for x in data],
'value': [x[2] for x in data]})
df1['weights'] = df1['value'] / df1['value'].sum()
gb1 = df1.groupby('date').aggregate(np.sum)
df2['weights'] = df1['value'] / df1['value'].sum()
gb2 = df2.groupby('date').aggregate(np.sum)
assert (len(gb1) == len(gb2))
def test_agg_period_index(self):
from pandas import period_range, PeriodIndex
prng = period_range('2012-1-1', freq='M', periods=3)
df = DataFrame(np.random.randn(3, 2), index=prng)
rs = df.groupby(level=0).sum()
tm.assertIsInstance(rs.index, PeriodIndex)
# GH 3579
index = period_range(start='1999-01', periods=5, freq='M')
s1 = Series(np.random.rand(len(index)), index=index)
s2 = Series(np.random.rand(len(index)), index=index)
series = [('s1', s1), ('s2', s2)]
df = DataFrame.from_items(series)
grouped = df.groupby(df.index.month)
list(grouped)
def test_agg_must_agg(self):
grouped = self.df.groupby('A')['C']
self.assertRaises(Exception, grouped.agg, lambda x: x.describe())
self.assertRaises(Exception, grouped.agg, lambda x: x.index[:2])
def test_agg_ser_multi_key(self):
# TODO(wesm): unused
ser = self.df.C # noqa
f = lambda x: x.sum()
results = self.df.C.groupby([self.df.A, self.df.B]).aggregate(f)
expected = self.df.groupby(['A', 'B']).sum()['C']
assert_series_equal(results, expected)
def test_get_group(self):
wp = tm.makePanel()
grouped = wp.groupby(lambda x: x.month, axis='major')
gp = grouped.get_group(1)
expected = wp.reindex(major=[x for x in wp.major_axis if x.month == 1])
assert_panel_equal(gp, expected)
# GH 5267
# be datelike friendly
df = DataFrame({'DATE': pd.to_datetime(
['10-Oct-2013', '10-Oct-2013', '10-Oct-2013', '11-Oct-2013',
'11-Oct-2013', '11-Oct-2013']),
'label': ['foo', 'foo', 'bar', 'foo', 'foo', 'bar'],
'VAL': [1, 2, 3, 4, 5, 6]})
g = df.groupby('DATE')
key = list(g.groups)[0]
result1 = g.get_group(key)
result2 = g.get_group(Timestamp(key).to_datetime())
result3 = g.get_group(str(Timestamp(key)))
assert_frame_equal(result1, result2)
assert_frame_equal(result1, result3)
g = df.groupby(['DATE', 'label'])
key = list(g.groups)[0]
result1 = g.get_group(key)
result2 = g.get_group((Timestamp(key[0]).to_datetime(), key[1]))
result3 = g.get_group((str(Timestamp(key[0])), key[1]))
assert_frame_equal(result1, result2)
assert_frame_equal(result1, result3)
# must pass a same-length tuple with multiple keys
self.assertRaises(ValueError, lambda: g.get_group('foo'))
self.assertRaises(ValueError, lambda: g.get_group(('foo')))
self.assertRaises(ValueError,
lambda: g.get_group(('foo', 'bar', 'baz')))
def test_get_group_grouped_by_tuple(self):
# GH 8121
df = DataFrame([[(1, ), (1, 2), (1, ), (1, 2)]], index=['ids']).T
gr = df.groupby('ids')
expected = DataFrame({'ids': [(1, ), (1, )]}, index=[0, 2])
result = gr.get_group((1, ))
assert_frame_equal(result, expected)
dt = pd.to_datetime(['2010-01-01', '2010-01-02', '2010-01-01',
'2010-01-02'])
df = DataFrame({'ids': [(x, ) for x in dt]})
gr = df.groupby('ids')
result = gr.get_group(('2010-01-01', ))
expected = DataFrame({'ids': [(dt[0], ), (dt[0], )]}, index=[0, 2])
assert_frame_equal(result, expected)
def test_agg_apply_corner(self):
# nothing to group, all NA
grouped = self.ts.groupby(self.ts * np.nan)
self.assertEqual(self.ts.dtype, np.float64)
# groupby float64 values results in Float64Index
exp = Series([], dtype=np.float64, index=pd.Index(
[], dtype=np.float64))
assert_series_equal(grouped.sum(), exp)
assert_series_equal(grouped.agg(np.sum), exp)
assert_series_equal(grouped.apply(np.sum), exp, check_index_type=False)
# DataFrame
grouped = self.tsframe.groupby(self.tsframe['A'] * np.nan)
exp_df = DataFrame(columns=self.tsframe.columns, dtype=float,
index=pd.Index(
[], dtype=np.float64))
assert_frame_equal(grouped.sum(), exp_df, check_names=False)
assert_frame_equal(grouped.agg(np.sum), exp_df, check_names=False)
assert_frame_equal(grouped.apply(np.sum), DataFrame({}, dtype=float))
def test_agg_grouping_is_list_tuple(self):
from pandas.core.groupby import Grouping
df = tm.makeTimeDataFrame()
grouped = df.groupby(lambda x: x.year)
grouper = grouped.grouper.groupings[0].grouper
grouped.grouper.groupings[0] = Grouping(self.ts.index, list(grouper))
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
grouped.grouper.groupings[0] = Grouping(self.ts.index, tuple(grouper))
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_grouping_error_on_multidim_input(self):
from pandas.core.groupby import Grouping
self.assertRaises(ValueError,
Grouping, self.df.index, self.df[['A', 'A']])
def test_agg_python_multiindex(self):
grouped = self.mframe.groupby(['A', 'B'])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_apply_describe_bug(self):
grouped = self.mframe.groupby(level='first')
grouped.describe() # it works!
def test_apply_issues(self):
# GH 5788
s = """2011.05.16,00:00,1.40893
2011.05.16,01:00,1.40760
2011.05.16,02:00,1.40750
2011.05.16,03:00,1.40649
2011.05.17,02:00,1.40893
2011.05.17,03:00,1.40760
2011.05.17,04:00,1.40750
2011.05.17,05:00,1.40649
2011.05.18,02:00,1.40893
2011.05.18,03:00,1.40760
2011.05.18,04:00,1.40750
2011.05.18,05:00,1.40649"""
df = pd.read_csv(
StringIO(s), header=None, names=['date', 'time', 'value'],
parse_dates=[['date', 'time']])
df = df.set_index('date_time')
expected = df.groupby(df.index.date).idxmax()
result = df.groupby(df.index.date).apply(lambda x: x.idxmax())
assert_frame_equal(result, expected)
# GH 5789
# don't auto coerce dates
df = pd.read_csv(
StringIO(s), header=None, names=['date', 'time', 'value'])
exp_idx = pd.Index(
['2011.05.16', '2011.05.17', '2011.05.18'
], dtype=object, name='date')
expected = Series(['00:00', '02:00', '02:00'], index=exp_idx)
result = df.groupby('date').apply(
lambda x: x['time'][x['value'].idxmax()])
assert_series_equal(result, expected)
def test_time_field_bug(self):
# Test a fix for the following error related to GH issue 11324 When
# non-key fields in a group-by dataframe contained time-based fields
# that were not returned by the apply function, an exception would be
# raised.
df = pd.DataFrame({'a': 1, 'b': [datetime.now() for nn in range(10)]})
def func_with_no_date(batch):
return pd.Series({'c': 2})
def func_with_date(batch):
return pd.Series({'c': 2, 'b': datetime(2015, 1, 1)})
dfg_no_conversion = df.groupby(by=['a']).apply(func_with_no_date)
dfg_no_conversion_expected = pd.DataFrame({'c': 2}, index=[1])
dfg_no_conversion_expected.index.name = 'a'
dfg_conversion = df.groupby(by=['a']).apply(func_with_date)
dfg_conversion_expected = pd.DataFrame(
{'b': datetime(2015, 1, 1),
'c': 2}, index=[1])
dfg_conversion_expected.index.name = 'a'
self.assert_frame_equal(dfg_no_conversion, dfg_no_conversion_expected)
self.assert_frame_equal(dfg_conversion, dfg_conversion_expected)
def test_len(self):
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day])
self.assertEqual(len(grouped), len(df))
grouped = df.groupby([lambda x: x.year, lambda x: x.month])
expected = len(set([(x.year, x.month) for x in df.index]))
self.assertEqual(len(grouped), expected)
# issue 11016
df = pd.DataFrame(dict(a=[np.nan] * 3, b=[1, 2, 3]))
self.assertEqual(len(df.groupby(('a'))), 0)
self.assertEqual(len(df.groupby(('b'))), 3)
self.assertEqual(len(df.groupby(('a', 'b'))), 3)
def test_groups(self):
grouped = self.df.groupby(['A'])
groups = grouped.groups
self.assertIs(groups, grouped.groups) # caching works
for k, v in compat.iteritems(grouped.groups):
self.assertTrue((self.df.ix[v]['A'] == k).all())
grouped = self.df.groupby(['A', 'B'])
groups = grouped.groups
self.assertIs(groups, grouped.groups) # caching works
for k, v in compat.iteritems(grouped.groups):
self.assertTrue((self.df.ix[v]['A'] == k[0]).all())
self.assertTrue((self.df.ix[v]['B'] == k[1]).all())
def test_aggregate_str_func(self):
def _check_results(grouped):
# single series
result = grouped['A'].agg('std')
expected = grouped['A'].std()
assert_series_equal(result, expected)
# group frame by function name
result = grouped.aggregate('var')
expected = grouped.var()
assert_frame_equal(result, expected)
# group frame by function dict
result = grouped.agg(OrderedDict([['A', 'var'], ['B', 'std'],
['C', 'mean'], ['D', 'sem']]))
expected = DataFrame(OrderedDict([['A', grouped['A'].var(
)], ['B', grouped['B'].std()], ['C', grouped['C'].mean()],
['D', grouped['D'].sem()]]))
assert_frame_equal(result, expected)
by_weekday = self.tsframe.groupby(lambda x: x.weekday())
_check_results(by_weekday)
by_mwkday = self.tsframe.groupby([lambda x: x.month,
lambda x: x.weekday()])
_check_results(by_mwkday)
def test_aggregate_item_by_item(self):
df = self.df.copy()
df['E'] = ['a'] * len(self.df)
grouped = self.df.groupby('A')
# API change in 0.11
# def aggfun(ser):
# return len(ser + 'a')
# result = grouped.agg(aggfun)
# self.assertEqual(len(result.columns), 1)
aggfun = lambda ser: ser.size
result = grouped.agg(aggfun)
foo = (self.df.A == 'foo').sum()
bar = (self.df.A == 'bar').sum()
K = len(result.columns)
# GH5782
# odd comparisons can result here, so cast to make easy
exp = pd.Series(np.array([foo] * K), index=list('BCD'),
dtype=np.float64, name='foo')
tm.assert_series_equal(result.xs('foo'), exp)
exp = pd.Series(np.array([bar] * K), index=list('BCD'),
dtype=np.float64, name='bar')
tm.assert_almost_equal(result.xs('bar'), exp)
def aggfun(ser):
return ser.size
result = DataFrame().groupby(self.df.A).agg(aggfun)
tm.assertIsInstance(result, DataFrame)
self.assertEqual(len(result), 0)
def test_agg_item_by_item_raise_typeerror(self):
from numpy.random import randint
df = DataFrame(randint(10, size=(20, 10)))
def raiseException(df):
pprint_thing('----------------------------------------')
pprint_thing(df.to_string())
raise TypeError
self.assertRaises(TypeError, df.groupby(0).agg, raiseException)
def test_basic_regression(self):
# regression
T = [1.0 * x for x in lrange(1, 10) * 10][:1095]
result = Series(T, lrange(0, len(T)))
groupings = np.random.random((1100, ))
groupings = Series(groupings, lrange(0, len(groupings))) * 10.
grouped = result.groupby(groupings)
grouped.mean()
def test_transform(self):
data = Series(np.arange(9) // 3, index=np.arange(9))
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
transformed = grouped.transform(lambda x: x * x.sum())
self.assertEqual(transformed[7], 12)
# GH 8046
# make sure that we preserve the input order
df = DataFrame(
np.arange(6, dtype='int64').reshape(
3, 2), columns=["a", "b"], index=[0, 2, 1])
key = [0, 0, 1]
expected = df.sort_index().groupby(key).transform(
lambda x: x - x.mean()).groupby(key).mean()
result = df.groupby(key).transform(lambda x: x - x.mean()).groupby(
key).mean()
assert_frame_equal(result, expected)
def demean(arr):
return arr - arr.mean()
people = DataFrame(np.random.randn(5, 5),
columns=['a', 'b', 'c', 'd', 'e'],
index=['Joe', 'Steve', 'Wes', 'Jim', 'Travis'])
key = ['one', 'two', 'one', 'two', 'one']
result = people.groupby(key).transform(demean).groupby(key).mean()
expected = people.groupby(key).apply(demean).groupby(key).mean()
assert_frame_equal(result, expected)
# GH 8430
df = tm.makeTimeDataFrame()
g = df.groupby(pd.TimeGrouper('M'))
g.transform(lambda x: x - 1)
# GH 9700
df = DataFrame({'a': range(5, 10), 'b': range(5)})
result = df.groupby('a').transform(max)
expected = DataFrame({'b': range(5)})
tm.assert_frame_equal(result, expected)
def test_transform_fast(self):
df = DataFrame({'id': np.arange(100000) / 3,
'val': np.random.randn(100000)})
grp = df.groupby('id')['val']
values = np.repeat(grp.mean().values,
com._ensure_platform_int(grp.count().values))
expected = pd.Series(values, index=df.index)
result = grp.transform(np.mean)
assert_series_equal(result, expected)
result = grp.transform('mean')
assert_series_equal(result, expected)
def test_transform_broadcast(self):
grouped = self.ts.groupby(lambda x: x.month)
result = grouped.transform(np.mean)
self.assertTrue(result.index.equals(self.ts.index))
for _, gp in grouped:
assert_fp_equal(result.reindex(gp.index), gp.mean())
grouped = self.tsframe.groupby(lambda x: x.month)
result = grouped.transform(np.mean)
self.assertTrue(result.index.equals(self.tsframe.index))
for _, gp in grouped:
agged = gp.mean()
res = result.reindex(gp.index)
for col in self.tsframe:
assert_fp_equal(res[col], agged[col])
# group columns
grouped = self.tsframe.groupby({'A': 0, 'B': 0, 'C': 1, 'D': 1},
axis=1)
result = grouped.transform(np.mean)
self.assertTrue(result.index.equals(self.tsframe.index))
self.assertTrue(result.columns.equals(self.tsframe.columns))
for _, gp in grouped:
agged = gp.mean(1)
res = result.reindex(columns=gp.columns)
for idx in gp.index:
assert_fp_equal(res.xs(idx), agged[idx])
def test_transform_axis(self):
# make sure that we are setting the axes
# correctly when on axis=0 or 1
# in the presence of a non-monotonic indexer
# GH12713
base = self.tsframe.iloc[0:5]
r = len(base.index)
c = len(base.columns)
tso = DataFrame(np.random.randn(r, c),
index=base.index,
columns=base.columns,
dtype='float64')
# monotonic
ts = tso
grouped = ts.groupby(lambda x: x.weekday())
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: x - x.mean())
assert_frame_equal(result, expected)
ts = ts.T
grouped = ts.groupby(lambda x: x.weekday(), axis=1)
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)
assert_frame_equal(result, expected)
# non-monotonic
ts = tso.iloc[[1, 0] + list(range(2, len(base)))]
grouped = ts.groupby(lambda x: x.weekday())
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: x - x.mean())
assert_frame_equal(result, expected)
ts = ts.T
grouped = ts.groupby(lambda x: x.weekday(), axis=1)
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)
assert_frame_equal(result, expected)
def test_transform_dtype(self):
# GH 9807
# Check transform dtype output is preserved
df = DataFrame([[1, 3], [2, 3]])
result = df.groupby(1).transform('mean')
expected = DataFrame([[1.5], [1.5]])
assert_frame_equal(result, expected)
def test_transform_bug(self):
# GH 5712
# transforming on a datetime column
df = DataFrame(dict(A=Timestamp('20130101'), B=np.arange(5)))
result = df.groupby('A')['B'].transform(
lambda x: x.rank(ascending=False))
expected = Series(np.arange(5, 0, step=-1), name='B')
assert_series_equal(result, expected)
def test_transform_multiple(self):
grouped = self.ts.groupby([lambda x: x.year, lambda x: x.month])
grouped.transform(lambda x: x * 2)
grouped.transform(np.mean)
def test_dispatch_transform(self):
df = self.tsframe[::5].reindex(self.tsframe.index)
grouped = df.groupby(lambda x: x.month)
filled = grouped.fillna(method='pad')
fillit = lambda x: x.fillna(method='pad')
expected = df.groupby(lambda x: x.month).transform(fillit)
assert_frame_equal(filled, expected)
def test_transform_select_columns(self):
f = lambda x: x.mean()
result = self.df.groupby('A')['C', 'D'].transform(f)
selection = self.df[['C', 'D']]
expected = selection.groupby(self.df['A']).transform(f)
assert_frame_equal(result, expected)
def test_transform_exclude_nuisance(self):
# this also tests orderings in transform between
# series/frame to make sure it's consistent
expected = {}
grouped = self.df.groupby('A')
expected['C'] = grouped['C'].transform(np.mean)
expected['D'] = grouped['D'].transform(np.mean)
expected = DataFrame(expected)
result = self.df.groupby('A').transform(np.mean)
assert_frame_equal(result, expected)
def test_transform_function_aliases(self):
result = self.df.groupby('A').transform('mean')
expected = self.df.groupby('A').transform(np.mean)
assert_frame_equal(result, expected)
result = self.df.groupby('A')['C'].transform('mean')
expected = self.df.groupby('A')['C'].transform(np.mean)
assert_series_equal(result, expected)
def test_transform_length(self):
# GH 9697
df = pd.DataFrame({'col1': [1, 1, 2, 2], 'col2': [1, 2, 3, np.nan]})
expected = pd.Series([3.0] * 4)
def nsum(x):
return np.nansum(x)
results = [df.groupby('col1').transform(sum)['col2'],
df.groupby('col1')['col2'].transform(sum),
df.groupby('col1').transform(nsum)['col2'],
df.groupby('col1')['col2'].transform(nsum)]
for result in results:
assert_series_equal(result, expected, check_names=False)
def test_with_na(self):
index = Index(np.arange(10))
for dtype in ['float64', 'float32', 'int64', 'int32', 'int16', 'int8']:
values = Series(np.ones(10), index, dtype=dtype)
labels = Series([nan, 'foo', 'bar', 'bar', nan, nan, 'bar',
'bar', nan, 'foo'], index=index)
# this SHOULD be an int
grouped = values.groupby(labels)
agged = grouped.agg(len)
expected = Series([4, 2], index=['bar', 'foo'])
assert_series_equal(agged, expected, check_dtype=False)
# self.assertTrue(issubclass(agged.dtype.type, np.integer))
# explicity return a float from my function
def f(x):
return float(len(x))
agged = grouped.agg(f)
expected = Series([4, 2], index=['bar', 'foo'])
assert_series_equal(agged, expected, check_dtype=False)
self.assertTrue(issubclass(agged.dtype.type, np.dtype(dtype).type))
def test_groupby_transform_with_int(self):
# GH 3740, make sure that we might upcast on item-by-item transform
# floats
df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=Series(1, dtype='float64'),
C=Series(
[1, 2, 3, 1, 2, 3], dtype='float64'), D='foo'))
result = df.groupby('A').transform(lambda x: (x - x.mean()) / x.std())
expected = DataFrame(dict(B=np.nan, C=Series(
[-1, 0, 1, -1, 0, 1], dtype='float64')))
assert_frame_equal(result, expected)
# int case
df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=1,
C=[1, 2, 3, 1, 2, 3], D='foo'))
result = df.groupby('A').transform(lambda x: (x - x.mean()) / x.std())
expected = DataFrame(dict(B=np.nan, C=[-1, 0, 1, -1, 0, 1]))
assert_frame_equal(result, expected)
# int that needs float conversion
s = Series([2, 3, 4, 10, 5, -1])
df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=1, C=s, D='foo'))
result = df.groupby('A').transform(lambda x: (x - x.mean()) / x.std())
s1 = s.iloc[0:3]
s1 = (s1 - s1.mean()) / s1.std()
s2 = s.iloc[3:6]
s2 = (s2 - s2.mean()) / s2.std()
expected = DataFrame(dict(B=np.nan, C=concat([s1, s2])))
assert_frame_equal(result, expected)
# int downcasting
result = df.groupby('A').transform(lambda x: x * 2 / 2)
expected = DataFrame(dict(B=1, C=[2, 3, 4, 10, 5, -1]))
assert_frame_equal(result, expected)
def test_indices_concatenation_order(self):
# GH 2808
def f1(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(levels=[[]] * 2, labels=[[]] * 2,
names=['b', 'c'])
res = DataFrame(None, columns=['a'], index=multiindex)
return res
else:
y = y.set_index(['b', 'c'])
return y
def f2(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
return DataFrame()
else:
y = y.set_index(['b', 'c'])
return y
def f3(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(levels=[[]] * 2, labels=[[]] * 2,
names=['foo', 'bar'])
res = DataFrame(None, columns=['a', 'b'], index=multiindex)
return res
else:
return y
df = DataFrame({'a': [1, 2, 2, 2], 'b': lrange(4), 'c': lrange(5, 9)})
df2 = DataFrame({'a': [3, 2, 2, 2], 'b': lrange(4), 'c': lrange(5, 9)})
# correct result
result1 = df.groupby('a').apply(f1)
result2 = df2.groupby('a').apply(f1)
assert_frame_equal(result1, result2)
# should fail (not the same number of levels)
self.assertRaises(AssertionError, df.groupby('a').apply, f2)
self.assertRaises(AssertionError, df2.groupby('a').apply, f2)
# should fail (incorrect shape)
self.assertRaises(AssertionError, df.groupby('a').apply, f3)
self.assertRaises(AssertionError, df2.groupby('a').apply, f3)
def test_attr_wrapper(self):
grouped = self.ts.groupby(lambda x: x.weekday())
result = grouped.std()
expected = grouped.agg(lambda x: np.std(x, ddof=1))
assert_series_equal(result, expected)
# this is pretty cool
result = grouped.describe()
expected = {}
for name, gp in grouped:
expected[name] = gp.describe()
expected = DataFrame(expected).T
assert_frame_equal(result.unstack(), expected)
# get attribute
result = grouped.dtype
expected = grouped.agg(lambda x: x.dtype)
# make sure raises error
self.assertRaises(AttributeError, getattr, grouped, 'foo')
def test_series_describe_multikey(self):
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe().unstack()
assert_series_equal(result['mean'], grouped.mean(), check_names=False)
assert_series_equal(result['std'], grouped.std(), check_names=False)
assert_series_equal(result['min'], grouped.min(), check_names=False)
def test_series_describe_single(self):
ts = tm.makeTimeSeries()
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(lambda x: x.describe())
expected = grouped.describe()
assert_series_equal(result, expected)
def test_series_agg_multikey(self):
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.sum)
expected = grouped.sum()
assert_series_equal(result, expected)
def test_series_agg_multi_pure_python(self):
data = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def bad(x):
assert (len(x.base) > 0)
return 'foo'
result = data.groupby(['A', 'B']).agg(bad)
expected = data.groupby(['A', 'B']).agg(lambda x: 'foo')
assert_frame_equal(result, expected)
def test_series_index_name(self):
grouped = self.df.ix[:, ['C']].groupby(self.df['A'])
result = grouped.agg(lambda x: x.mean())
self.assertEqual(result.index.name, 'A')
def test_frame_describe_multikey(self):
grouped = self.tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
for col in self.tsframe:
expected = grouped[col].describe()
assert_series_equal(result[col], expected, check_names=False)
groupedT = self.tsframe.groupby({'A': 0, 'B': 0,
'C': 1, 'D': 1}, axis=1)
result = groupedT.describe()
for name, group in groupedT:
assert_frame_equal(result[name], group.describe())
def test_frame_groupby(self):
grouped = self.tsframe.groupby(lambda x: x.weekday())
# aggregate
aggregated = grouped.aggregate(np.mean)
self.assertEqual(len(aggregated), 5)
self.assertEqual(len(aggregated.columns), 4)
# by string
tscopy = self.tsframe.copy()
tscopy['weekday'] = [x.weekday() for x in tscopy.index]
stragged = tscopy.groupby('weekday').aggregate(np.mean)
assert_frame_equal(stragged, aggregated, check_names=False)
# transform
grouped = self.tsframe.head(30).groupby(lambda x: x.weekday())
transformed = grouped.transform(lambda x: x - x.mean())
self.assertEqual(len(transformed), 30)
self.assertEqual(len(transformed.columns), 4)
# transform propagate
transformed = grouped.transform(lambda x: x.mean())
for name, group in grouped:
mean = group.mean()
for idx in group.index:
tm.assert_series_equal(transformed.xs(idx), mean,
check_names=False)
# iterate
for weekday, group in grouped:
self.assertEqual(group.index[0].weekday(), weekday)
# groups / group_indices
groups = grouped.groups
indices = grouped.indices
for k, v in compat.iteritems(groups):
samething = self.tsframe.index.take(indices[k])
self.assertTrue((samething == v).all())
def test_grouping_is_iterable(self):
# this code path isn't used anywhere else
# not sure it's useful
grouped = self.tsframe.groupby([lambda x: x.weekday(), lambda x: x.year
])
# test it works
for g in grouped.grouper.groupings[0]:
pass
def test_frame_groupby_columns(self):
mapping = {'A': 0, 'B': 0, 'C': 1, 'D': 1}
grouped = self.tsframe.groupby(mapping, axis=1)
# aggregate
aggregated = grouped.aggregate(np.mean)
self.assertEqual(len(aggregated), len(self.tsframe))
self.assertEqual(len(aggregated.columns), 2)
# transform
tf = lambda x: x - x.mean()
groupedT = self.tsframe.T.groupby(mapping, axis=0)
assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf))
# iterate
for k, v in grouped:
self.assertEqual(len(v.columns), 2)
def test_frame_set_name_single(self):
grouped = self.df.groupby('A')
result = grouped.mean()
self.assertEqual(result.index.name, 'A')
result = self.df.groupby('A', as_index=False).mean()
self.assertNotEqual(result.index.name, 'A')
result = grouped.agg(np.mean)
self.assertEqual(result.index.name, 'A')
result = grouped.agg({'C': np.mean, 'D': np.std})
self.assertEqual(result.index.name, 'A')
result = grouped['C'].mean()
self.assertEqual(result.index.name, 'A')
result = grouped['C'].agg(np.mean)
self.assertEqual(result.index.name, 'A')
result = grouped['C'].agg([np.mean, np.std])
self.assertEqual(result.index.name, 'A')
result = grouped['C'].agg({'foo': np.mean, 'bar': np.std})
self.assertEqual(result.index.name, 'A')
def test_aggregate_api_consistency(self):
# GH 9052
# make sure that the aggregates via dict
# are consistent
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
grouped = df.groupby(['A', 'B'])
c_mean = grouped['C'].mean()
c_sum = grouped['C'].sum()
d_mean = grouped['D'].mean()
d_sum = grouped['D'].sum()
result = grouped['D'].agg(['sum', 'mean'])
expected = pd.concat([d_sum, d_mean],
axis=1)
expected.columns = ['sum', 'mean']
assert_frame_equal(result, expected, check_like=True)
result = grouped.agg([np.sum, np.mean])
expected = pd.concat([c_sum,
c_mean,
d_sum,
d_mean],
axis=1)
expected.columns = MultiIndex.from_product([['C', 'D'],
['sum', 'mean']])
assert_frame_equal(result, expected, check_like=True)
result = grouped[['D', 'C']].agg([np.sum, np.mean])
expected = pd.concat([d_sum,
d_mean,
c_sum,
c_mean],
axis=1)
expected.columns = MultiIndex.from_product([['D', 'C'],
['sum', 'mean']])
assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({'C': 'mean', 'D': 'sum'})
expected = pd.concat([d_sum,
c_mean],
axis=1)
assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({'C': ['mean', 'sum'],
'D': ['mean', 'sum']})
expected = pd.concat([c_mean,
c_sum,
d_mean,
d_sum],
axis=1)
expected.columns = MultiIndex.from_product([['C', 'D'],
['mean', 'sum']])
result = grouped[['D', 'C']].agg({'r': np.sum,
'r2': np.mean})
expected = pd.concat([d_sum,
c_sum,
d_mean,
c_mean],
axis=1)
expected.columns = MultiIndex.from_product([['r', 'r2'],
['D', 'C']])
assert_frame_equal(result, expected, check_like=True)
def test_agg_compat(self):
# GH 12334
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
g = df.groupby(['A', 'B'])
expected = pd.concat([g['D'].sum(),
g['D'].std()],
axis=1)
expected.columns = MultiIndex.from_tuples([('C', 'sum'),
('C', 'std')])
result = g['D'].agg({'C': ['sum', 'std']})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([g['D'].sum(),
g['D'].std()],
axis=1)
expected.columns = ['C', 'D']
result = g['D'].agg({'C': 'sum', 'D': 'std'})
assert_frame_equal(result, expected, check_like=True)
def test_agg_nested_dicts(self):
# API change for disallowing these types of nested dicts
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
g = df.groupby(['A', 'B'])
def f():
g.aggregate({'r1': {'C': ['mean', 'sum']},
'r2': {'D': ['mean', 'sum']}})
self.assertRaises(SpecificationError, f)
result = g.agg({'C': {'ra': ['mean', 'std']},
'D': {'rb': ['mean', 'std']}})
expected = pd.concat([g['C'].mean(), g['C'].std(), g['D'].mean(),
g['D'].std()], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), (
'ra', 'std'), ('rb', 'mean'), ('rb', 'std')])
assert_frame_equal(result, expected, check_like=True)
# same name as the original column
# GH9052
expected = g['D'].agg({'result1': np.sum, 'result2': np.mean})
expected = expected.rename(columns={'result1': 'D'})
result = g['D'].agg({'D': np.sum, 'result2': np.mean})
assert_frame_equal(result, expected, check_like=True)
def test_multi_iter(self):
s = Series(np.arange(6))
k1 = np.array(['a', 'a', 'a', 'b', 'b', 'b'])
k2 = np.array(['1', '2', '1', '2', '1', '2'])
grouped = s.groupby([k1, k2])
iterated = list(grouped)
expected = [('a', '1', s[[0, 2]]), ('a', '2', s[[1]]),
('b', '1', s[[4]]), ('b', '2', s[[3, 5]])]
for i, ((one, two), three) in enumerate(iterated):
e1, e2, e3 = expected[i]
self.assertEqual(e1, one)
self.assertEqual(e2, two)
assert_series_equal(three, e3)
def test_multi_iter_frame(self):
k1 = np.array(['b', 'b', 'b', 'a', 'a', 'a'])
k2 = np.array(['1', '2', '1', '2', '1', '2'])
df = DataFrame({'v1': np.random.randn(6),
'v2': np.random.randn(6),
'k1': k1, 'k2': k2},
index=['one', 'two', 'three', 'four', 'five', 'six'])
grouped = df.groupby(['k1', 'k2'])
# things get sorted!
iterated = list(grouped)
idx = df.index
expected = [('a', '1', df.ix[idx[[4]]]),
('a', '2', df.ix[idx[[3, 5]]]),
('b', '1', df.ix[idx[[0, 2]]]),
('b', '2', df.ix[idx[[1]]])]
for i, ((one, two), three) in enumerate(iterated):
e1, e2, e3 = expected[i]
self.assertEqual(e1, one)
self.assertEqual(e2, two)
assert_frame_equal(three, e3)
# don't iterate through groups with no data
df['k1'] = np.array(['b', 'b', 'b', 'a', 'a', 'a'])
df['k2'] = np.array(['1', '1', '1', '2', '2', '2'])
grouped = df.groupby(['k1', 'k2'])
groups = {}
for key, gp in grouped:
groups[key] = gp
self.assertEqual(len(groups), 2)
# axis = 1
three_levels = self.three_group.groupby(['A', 'B', 'C']).mean()
grouped = three_levels.T.groupby(axis=1, level=(1, 2))
for key, group in grouped:
pass
def test_multi_iter_panel(self):
wp = tm.makePanel()
grouped = wp.groupby([lambda x: x.month, lambda x: x.weekday()],
axis=1)
for (month, wd), group in grouped:
exp_axis = [x
for x in wp.major_axis
if x.month == month and x.weekday() == wd]
expected = wp.reindex(major=exp_axis)
assert_panel_equal(group, expected)
def test_multi_func(self):
col1 = self.df['A']
col2 = self.df['B']
grouped = self.df.groupby([col1.get, col2.get])
agged = grouped.mean()
expected = self.df.groupby(['A', 'B']).mean()
assert_frame_equal(agged.ix[:, ['C', 'D']], expected.ix[:, ['C', 'D']],
check_names=False) # TODO groupby get drops names
# some "groups" with no data
df = DataFrame({'v1': np.random.randn(6),
'v2': np.random.randn(6),
'k1': np.array(['b', 'b', 'b', 'a', 'a', 'a']),
'k2': np.array(['1', '1', '1', '2', '2', '2'])},
index=['one', 'two', 'three', 'four', 'five', 'six'])
# only verify that it works for now
grouped = df.groupby(['k1', 'k2'])
grouped.agg(np.sum)
def test_multi_key_multiple_functions(self):
grouped = self.df.groupby(['A', 'B'])['C']
agged = grouped.agg([np.mean, np.std])
expected = DataFrame({'mean': grouped.agg(np.mean),
'std': grouped.agg(np.std)})
assert_frame_equal(agged, expected)
def test_frame_multi_key_function_list(self):
data = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
grouped = data.groupby(['A', 'B'])
funcs = [np.mean, np.std]
agged = grouped.agg(funcs)
expected = concat([grouped['D'].agg(funcs), grouped['E'].agg(funcs),
grouped['F'].agg(funcs)],
keys=['D', 'E', 'F'], axis=1)
assert (isinstance(agged.index, MultiIndex))
assert (isinstance(expected.index, MultiIndex))
assert_frame_equal(agged, expected)
def test_groupby_multiple_columns(self):
data = self.df
grouped = data.groupby(['A', 'B'])
def _check_op(op):
result1 = op(grouped)
expected = defaultdict(dict)
for n1, gp1 in data.groupby('A'):
for n2, gp2 in gp1.groupby('B'):
expected[n1][n2] = op(gp2.ix[:, ['C', 'D']])
expected = dict((k, DataFrame(v))
for k, v in compat.iteritems(expected))
expected = Panel.fromDict(expected).swapaxes(0, 1)
expected.major_axis.name, expected.minor_axis.name = 'A', 'B'
# a little bit crude
for col in ['C', 'D']:
result_col = op(grouped[col])
exp = expected[col]
pivoted = result1[col].unstack()
pivoted2 = result_col.unstack()
assert_frame_equal(pivoted.reindex_like(exp), exp)
assert_frame_equal(pivoted2.reindex_like(exp), exp)
_check_op(lambda x: x.sum())
_check_op(lambda x: x.mean())
# test single series works the same
result = data['C'].groupby([data['A'], data['B']]).mean()
expected = data.groupby(['A', 'B']).mean()['C']
assert_series_equal(result, expected)
def test_groupby_as_index_agg(self):
grouped = self.df.groupby('A', as_index=False)
# single-key
result = grouped.agg(np.mean)
expected = grouped.mean()
assert_frame_equal(result, expected)
result2 = grouped.agg(OrderedDict([['C', np.mean], ['D', np.sum]]))
expected2 = grouped.mean()
expected2['D'] = grouped.sum()['D']
assert_frame_equal(result2, expected2)
grouped = self.df.groupby('A', as_index=True)
expected3 = grouped['C'].sum()
expected3 = DataFrame(expected3).rename(columns={'C': 'Q'})
result3 = grouped['C'].agg({'Q': np.sum})
assert_frame_equal(result3, expected3)
# multi-key
grouped = self.df.groupby(['A', 'B'], as_index=False)
result = grouped.agg(np.mean)
expected = grouped.mean()
assert_frame_equal(result, expected)
result2 = grouped.agg(OrderedDict([['C', np.mean], ['D', np.sum]]))
expected2 = grouped.mean()
expected2['D'] = grouped.sum()['D']
assert_frame_equal(result2, expected2)
expected3 = grouped['C'].sum()
expected3 = DataFrame(expected3).rename(columns={'C': 'Q'})
result3 = grouped['C'].agg({'Q': np.sum})
assert_frame_equal(result3, expected3)
# GH7115 & GH8112 & GH8582
df = DataFrame(np.random.randint(0, 100, (50, 3)),
columns=['jim', 'joe', 'jolie'])
ts = Series(np.random.randint(5, 10, 50), name='jim')
gr = df.groupby(ts)
gr.nth(0) # invokes set_selection_from_grouper internally
assert_frame_equal(gr.apply(sum), df.groupby(ts).apply(sum))
for attr in ['mean', 'max', 'count', 'idxmax', 'cumsum', 'all']:
gr = df.groupby(ts, as_index=False)
left = getattr(gr, attr)()
gr = df.groupby(ts.values, as_index=True)
right = getattr(gr, attr)().reset_index(drop=True)
assert_frame_equal(left, right)
def test_series_groupby_nunique(self):
from itertools import product
from string import ascii_lowercase
def check_nunique(df, keys):
for sort, dropna in product((False, True), repeat=2):
gr = df.groupby(keys, sort=sort)
left = gr['julie'].nunique(dropna=dropna)
gr = df.groupby(keys, sort=sort)
right = gr['julie'].apply(Series.nunique, dropna=dropna)
assert_series_equal(left, right)
days = date_range('2015-08-23', periods=10)
for n, m in product(10 ** np.arange(2, 6), (10, 100, 1000)):
frame = DataFrame({
'jim': np.random.choice(
list(ascii_lowercase), n),
'joe': np.random.choice(days, n),
'julie': np.random.randint(0, m, n)
})
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
frame.loc[1::17, 'jim'] = None
frame.loc[3::37, 'joe'] = None
frame.loc[7::19, 'julie'] = None
frame.loc[8::19, 'julie'] = None
frame.loc[9::19, 'julie'] = None
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
def test_series_groupby_value_counts(self):
from itertools import product
def rebuild_index(df):
arr = list(map(df.index.get_level_values, range(df.index.nlevels)))
df.index = MultiIndex.from_arrays(arr, names=df.index.names)
return df
def check_value_counts(df, keys, bins):
for isort, normalize, sort, ascending, dropna \
in product((False, True), repeat=5):
kwargs = dict(normalize=normalize, sort=sort,
ascending=ascending, dropna=dropna, bins=bins)
gr = df.groupby(keys, sort=isort)
left = gr['3rd'].value_counts(**kwargs)
gr = df.groupby(keys, sort=isort)
right = gr['3rd'].apply(Series.value_counts, **kwargs)
right.index.names = right.index.names[:-1] + ['3rd']
# have to sort on index because of unstable sort on values
left, right = map(rebuild_index, (left, right)) # xref GH9212
assert_series_equal(left.sort_index(), right.sort_index())
def loop(df):
bins = None, np.arange(0, max(5, df['3rd'].max()) + 1, 2)
keys = '1st', '2nd', ('1st', '2nd')
for k, b in product(keys, bins):
check_value_counts(df, k, b)
days = date_range('2015-08-24', periods=10)
for n, m in product((100, 1000), (5, 20)):
frame = DataFrame({
'1st': np.random.choice(
list('abcd'), n),
'2nd': np.random.choice(days, n),
'3rd': np.random.randint(1, m + 1, n)
})
loop(frame)
frame.loc[1::11, '1st'] = nan
frame.loc[3::17, '2nd'] = nan
frame.loc[7::19, '3rd'] = nan
frame.loc[8::19, '3rd'] = nan
frame.loc[9::19, '3rd'] = nan
loop(frame)
def test_mulitindex_passthru(self):
# GH 7997
# regression from 0.14.1
df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
df.columns = pd.MultiIndex.from_tuples([(0, 1), (1, 1), (2, 1)])
result = df.groupby(axis=1, level=[0, 1]).first()
assert_frame_equal(result, df)
def test_multifunc_select_col_integer_cols(self):
df = self.df
df.columns = np.arange(len(df.columns))
# it works!
df.groupby(1, as_index=False)[2].agg({'Q': np.mean})
def test_as_index_series_return_frame(self):
grouped = self.df.groupby('A', as_index=False)
grouped2 = self.df.groupby(['A', 'B'], as_index=False)
result = grouped['C'].agg(np.sum)
expected = grouped.agg(np.sum).ix[:, ['A', 'C']]
tm.assertIsInstance(result, DataFrame)
assert_frame_equal(result, expected)
result2 = grouped2['C'].agg(np.sum)
expected2 = grouped2.agg(np.sum).ix[:, ['A', 'B', 'C']]
tm.assertIsInstance(result2, DataFrame)
assert_frame_equal(result2, expected2)
result = grouped['C'].sum()
expected = grouped.sum().ix[:, ['A', 'C']]
tm.assertIsInstance(result, DataFrame)
assert_frame_equal(result, expected)
result2 = grouped2['C'].sum()
expected2 = grouped2.sum().ix[:, ['A', 'B', 'C']]
tm.assertIsInstance(result2, DataFrame)
assert_frame_equal(result2, expected2)
# corner case
self.assertRaises(Exception, grouped['C'].__getitem__, 'D')
def test_groupby_as_index_cython(self):
data = self.df
# single-key
grouped = data.groupby('A', as_index=False)
result = grouped.mean()
expected = data.groupby(['A']).mean()
expected.insert(0, 'A', expected.index)
expected.index = np.arange(len(expected))
assert_frame_equal(result, expected)
# multi-key
grouped = data.groupby(['A', 'B'], as_index=False)
result = grouped.mean()
expected = data.groupby(['A', 'B']).mean()
arrays = lzip(*expected.index._tuple_index)
expected.insert(0, 'A', arrays[0])
expected.insert(1, 'B', arrays[1])
expected.index = np.arange(len(expected))
assert_frame_equal(result, expected)
def test_groupby_as_index_series_scalar(self):
grouped = self.df.groupby(['A', 'B'], as_index=False)
# GH #421
result = grouped['C'].agg(len)
expected = grouped.agg(len).ix[:, ['A', 'B', 'C']]
assert_frame_equal(result, expected)
def test_groupby_as_index_corner(self):
self.assertRaises(TypeError, self.ts.groupby, lambda x: x.weekday(),
as_index=False)
self.assertRaises(ValueError, self.df.groupby, lambda x: x.lower(),
as_index=False, axis=1)
def test_groupby_as_index_apply(self):
# GH #4648 and #3417
df = DataFrame({'item_id': ['b', 'b', 'a', 'c', 'a', 'b'],
'user_id': [1, 2, 1, 1, 3, 1],
'time': range(6)})
g_as = df.groupby('user_id', as_index=True)
g_not_as = df.groupby('user_id', as_index=False)
res_as = g_as.head(2).index
res_not_as = g_not_as.head(2).index
exp = Index([0, 1, 2, 4])
assert_index_equal(res_as, exp)
assert_index_equal(res_not_as, exp)
res_as_apply = g_as.apply(lambda x: x.head(2)).index
res_not_as_apply = g_not_as.apply(lambda x: x.head(2)).index
# apply doesn't maintain the original ordering
# changed in GH5610 as the as_index=False returns a MI here
exp_not_as_apply = MultiIndex.from_tuples([(0, 0), (0, 2), (1, 1), (
2, 4)])
tp = [(1, 0), (1, 2), (2, 1), (3, 4)]
exp_as_apply = MultiIndex.from_tuples(tp, names=['user_id', None])
assert_index_equal(res_as_apply, exp_as_apply)
assert_index_equal(res_not_as_apply, exp_not_as_apply)
ind = Index(list('abcde'))
df = DataFrame([[1, 2], [2, 3], [1, 4], [1, 5], [2, 6]], index=ind)
res = df.groupby(0, as_index=False).apply(lambda x: x).index
assert_index_equal(res, ind)
def test_groupby_head_tail(self):
df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=['A', 'B'])
g_as = df.groupby('A', as_index=True)
g_not_as = df.groupby('A', as_index=False)
# as_index= False, much easier
assert_frame_equal(df.loc[[0, 2]], g_not_as.head(1))
assert_frame_equal(df.loc[[1, 2]], g_not_as.tail(1))
empty_not_as = DataFrame(columns=df.columns, index=pd.Index(
[], dtype=df.index.dtype))
empty_not_as['A'] = empty_not_as['A'].astype(df.A.dtype)
empty_not_as['B'] = empty_not_as['B'].astype(df.B.dtype)
assert_frame_equal(empty_not_as, g_not_as.head(0))
assert_frame_equal(empty_not_as, g_not_as.tail(0))
assert_frame_equal(empty_not_as, g_not_as.head(-1))
assert_frame_equal(empty_not_as, g_not_as.tail(-1))
assert_frame_equal(df, g_not_as.head(7)) # contains all
assert_frame_equal(df, g_not_as.tail(7))
# as_index=True, (used to be different)
df_as = df
assert_frame_equal(df_as.loc[[0, 2]], g_as.head(1))
assert_frame_equal(df_as.loc[[1, 2]], g_as.tail(1))
empty_as = DataFrame(index=df_as.index[:0], columns=df.columns)
empty_as['A'] = empty_not_as['A'].astype(df.A.dtype)
empty_as['B'] = empty_not_as['B'].astype(df.B.dtype)
assert_frame_equal(empty_as, g_as.head(0))
assert_frame_equal(empty_as, g_as.tail(0))
assert_frame_equal(empty_as, g_as.head(-1))
assert_frame_equal(empty_as, g_as.tail(-1))
assert_frame_equal(df_as, g_as.head(7)) # contains all
assert_frame_equal(df_as, g_as.tail(7))
# test with selection
assert_frame_equal(g_as[[]].head(1), df_as.loc[[0, 2], []])
assert_frame_equal(g_as[['A']].head(1), df_as.loc[[0, 2], ['A']])
assert_frame_equal(g_as[['B']].head(1), df_as.loc[[0, 2], ['B']])
assert_frame_equal(g_as[['A', 'B']].head(1), df_as.loc[[0, 2]])
assert_frame_equal(g_not_as[[]].head(1), df_as.loc[[0, 2], []])
assert_frame_equal(g_not_as[['A']].head(1), df_as.loc[[0, 2], ['A']])
assert_frame_equal(g_not_as[['B']].head(1), df_as.loc[[0, 2], ['B']])
assert_frame_equal(g_not_as[['A', 'B']].head(1), df_as.loc[[0, 2]])
def test_groupby_multiple_key(self):
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day])
agged = grouped.sum()
assert_almost_equal(df.values, agged.values)
grouped = df.T.groupby([lambda x: x.year,
lambda x: x.month,
lambda x: x.day], axis=1)
agged = grouped.agg(lambda x: x.sum())
self.assertTrue(agged.index.equals(df.columns))
assert_almost_equal(df.T.values, agged.values)
agged = grouped.agg(lambda x: x.sum())
assert_almost_equal(df.T.values, agged.values)
def test_groupby_multi_corner(self):
# test that having an all-NA column doesn't mess you up
df = self.df.copy()
df['bad'] = np.nan
agged = df.groupby(['A', 'B']).mean()
expected = self.df.groupby(['A', 'B']).mean()
expected['bad'] = np.nan
assert_frame_equal(agged, expected)
def test_omit_nuisance(self):
grouped = self.df.groupby('A')
result = grouped.mean()
expected = self.df.ix[:, ['A', 'C', 'D']].groupby('A').mean()
assert_frame_equal(result, expected)
agged = grouped.agg(np.mean)
exp = grouped.mean()
assert_frame_equal(agged, exp)
df = self.df.ix[:, ['A', 'C', 'D']]
df['E'] = datetime.now()
grouped = df.groupby('A')
result = grouped.agg(np.sum)
expected = grouped.sum()
assert_frame_equal(result, expected)
# won't work with axis = 1
grouped = df.groupby({'A': 0, 'C': 0, 'D': 1, 'E': 1}, axis=1)
result = self.assertRaises(TypeError, grouped.agg,
lambda x: x.sum(0, numeric_only=False))
def test_omit_nuisance_python_multiple(self):
grouped = self.three_group.groupby(['A', 'B'])
agged = grouped.agg(np.mean)
exp = grouped.mean()
assert_frame_equal(agged, exp)
def test_empty_groups_corner(self):
# handle empty groups
df = DataFrame({'k1': np.array(['b', 'b', 'b', 'a', 'a', 'a']),
'k2': np.array(['1', '1', '1', '2', '2', '2']),
'k3': ['foo', 'bar'] * 3,
'v1': np.random.randn(6),
'v2': np.random.randn(6)})
grouped = df.groupby(['k1', 'k2'])
result = grouped.agg(np.mean)
expected = grouped.mean()
assert_frame_equal(result, expected)
grouped = self.mframe[3:5].groupby(level=0)
agged = grouped.apply(lambda x: x.mean())
agged_A = grouped['A'].apply(np.mean)
assert_series_equal(agged['A'], agged_A)
self.assertEqual(agged.index.name, 'first')
def test_apply_concat_preserve_names(self):
grouped = self.three_group.groupby(['A', 'B'])
def desc(group):
result = group.describe()
result.index.name = 'stat'
return result
def desc2(group):
result = group.describe()
result.index.name = 'stat'
result = result[:len(group)]
# weirdo
return result
def desc3(group):
result = group.describe()
# names are different
result.index.name = 'stat_%d' % len(group)
result = result[:len(group)]
# weirdo
return result
result = grouped.apply(desc)
self.assertEqual(result.index.names, ('A', 'B', 'stat'))
result2 = grouped.apply(desc2)
self.assertEqual(result2.index.names, ('A', 'B', 'stat'))
result3 = grouped.apply(desc3)
self.assertEqual(result3.index.names, ('A', 'B', None))
def test_nonsense_func(self):
df = DataFrame([0])
self.assertRaises(Exception, df.groupby, lambda x: x + 'foo')
def test_builtins_apply(self): # GH8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)),
columns=['jim', 'joe'])
df['jolie'] = np.random.randn(1000)
for keys in ['jim', ['jim', 'joe']]: # single key & multi-key
if keys == 'jim':
continue
for f in [max, min, sum]:
fname = f.__name__
result = df.groupby(keys).apply(f)
result.shape
ngroups = len(df.drop_duplicates(subset=keys))
assert result.shape == (ngroups, 3), 'invalid frame shape: '\
'{} (expected ({}, 3))'.format(result.shape, ngroups)
assert_frame_equal(result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)))
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
assert_frame_equal(result, expected, check_dtype=False)
assert_series_equal(getattr(result, fname)(),
getattr(df, fname)())
def test_cythonized_aggers(self):
data = {'A': [0, 0, 0, 0, 1, 1, 1, 1, 1, 1., nan, nan],
'B': ['A', 'B'] * 6,
'C': np.random.randn(12)}
df = DataFrame(data)
df.loc[2:10:2, 'C'] = nan
def _testit(name):
op = lambda x: getattr(x, name)()
# single column
grouped = df.drop(['B'], axis=1).groupby('A')
exp = {}
for cat, group in grouped:
exp[cat] = op(group['C'])
exp = DataFrame({'C': exp})
exp.index.name = 'A'
result = op(grouped)
assert_frame_equal(result, exp)
# multiple columns
grouped = df.groupby(['A', 'B'])
expd = {}
for (cat1, cat2), group in grouped:
expd.setdefault(cat1, {})[cat2] = op(group['C'])
exp = DataFrame(expd).T.stack(dropna=False)
exp.index.names = ['A', 'B']
exp.name = 'C'
result = op(grouped)['C']
if not tm._incompat_bottleneck_version(name):
assert_series_equal(result, exp)
_testit('count')
_testit('sum')
_testit('std')
_testit('var')
_testit('sem')
_testit('mean')
_testit('median')
_testit('prod')
_testit('min')
_testit('max')
def test_max_min_non_numeric(self):
# #2700
aa = DataFrame({'nn': [11, 11, 22, 22],
'ii': [1, 2, 3, 4],
'ss': 4 * ['mama']})
result = aa.groupby('nn').max()
self.assertTrue('ss' in result)
result = aa.groupby('nn').min()
self.assertTrue('ss' in result)
def test_cython_agg_boolean(self):
frame = DataFrame({'a': np.random.randint(0, 5, 50),
'b': np.random.randint(0, 2, 50).astype('bool')})
result = frame.groupby('a')['b'].mean()
expected = frame.groupby('a')['b'].agg(np.mean)
assert_series_equal(result, expected)
def test_cython_agg_nothing_to_agg(self):
frame = DataFrame({'a': np.random.randint(0, 5, 50),
'b': ['foo', 'bar'] * 25})
self.assertRaises(DataError, frame.groupby('a')['b'].mean)
frame = DataFrame({'a': np.random.randint(0, 5, 50),
'b': ['foo', 'bar'] * 25})
self.assertRaises(DataError, frame[['b']].groupby(frame['a']).mean)
def test_cython_agg_nothing_to_agg_with_dates(self):
frame = DataFrame({'a': np.random.randint(0, 5, 50),
'b': ['foo', 'bar'] * 25,
'dates': pd.date_range('now', periods=50,
freq='T')})
with tm.assertRaisesRegexp(DataError, "No numeric types to aggregate"):
frame.groupby('b').dates.mean()
def test_groupby_timedelta_cython_count(self):
df = DataFrame({'g': list('ab' * 2),
'delt': np.arange(4).astype('timedelta64[ns]')})
expected = Series([
2, 2
], index=pd.Index(['a', 'b'], name='g'), name='delt')
result = df.groupby('g').delt.count()
tm.assert_series_equal(expected, result)
def test_cython_agg_frame_columns(self):
# #2113
df = DataFrame({'x': [1, 2, 3], 'y': [3, 4, 5]})
df.groupby(level=0, axis='columns').mean()
df.groupby(level=0, axis='columns').mean()
df.groupby(level=0, axis='columns').mean()
df.groupby(level=0, axis='columns').mean()
def test_wrap_aggregated_output_multindex(self):
df = self.mframe.T
df['baz', 'two'] = 'peekaboo'
keys = [np.array([0, 0, 1]), np.array([0, 0, 1])]
agged = df.groupby(keys).agg(np.mean)
tm.assertIsInstance(agged.columns, MultiIndex)
def aggfun(ser):
if ser.name == ('foo', 'one'):
raise TypeError
else:
return ser.sum()
agged2 = df.groupby(keys).aggregate(aggfun)
self.assertEqual(len(agged2.columns) + 1, len(df.columns))
def test_groupby_level(self):
frame = self.mframe
deleveled = frame.reset_index()
result0 = frame.groupby(level=0).sum()
result1 = frame.groupby(level=1).sum()
expected0 = frame.groupby(deleveled['first'].values).sum()
expected1 = frame.groupby(deleveled['second'].values).sum()
expected0 = expected0.reindex(frame.index.levels[0])
expected1 = expected1.reindex(frame.index.levels[1])
self.assertEqual(result0.index.name, 'first')
self.assertEqual(result1.index.name, 'second')
assert_frame_equal(result0, expected0)
assert_frame_equal(result1, expected1)
self.assertEqual(result0.index.name, frame.index.names[0])
self.assertEqual(result1.index.name, frame.index.names[1])
# groupby level name
result0 = frame.groupby(level='first').sum()
result1 = frame.groupby(level='second').sum()
assert_frame_equal(result0, expected0)
assert_frame_equal(result1, expected1)
# axis=1
result0 = frame.T.groupby(level=0, axis=1).sum()
result1 = frame.T.groupby(level=1, axis=1).sum()
assert_frame_equal(result0, expected0.T)
assert_frame_equal(result1, expected1.T)
# raise exception for non-MultiIndex
self.assertRaises(ValueError, self.df.groupby, level=1)
def test_groupby_level_index_names(self):
# GH4014 this used to raise ValueError since 'exp'>1 (in py2)
df = DataFrame({'exp': ['A'] * 3 + ['B'] * 3,
'var1': lrange(6), }).set_index('exp')
df.groupby(level='exp')
self.assertRaises(ValueError, df.groupby, level='foo')
def test_groupby_level_with_nas(self):
index = MultiIndex(levels=[[1, 0], [0, 1, 2, 3]],
labels=[[1, 1, 1, 1, 0, 0, 0, 0], [0, 1, 2, 3, 0, 1,
2, 3]])
# factorizing doesn't confuse things
s = Series(np.arange(8.), index=index)
result = s.groupby(level=0).sum()
expected = Series([22., 6.], index=[1, 0])
assert_series_equal(result, expected)
index = MultiIndex(levels=[[1, 0], [0, 1, 2, 3]],
labels=[[1, 1, 1, 1, -1, 0, 0, 0], [0, 1, 2, 3, 0,
1, 2, 3]])
# factorizing doesn't confuse things
s = Series(np.arange(8.), index=index)
result = s.groupby(level=0).sum()
expected = Series([18., 6.], index=[1, 0])
assert_series_equal(result, expected)
def test_groupby_level_apply(self):
frame = self.mframe
result = frame.groupby(level=0).count()
self.assertEqual(result.index.name, 'first')
result = frame.groupby(level=1).count()
self.assertEqual(result.index.name, 'second')
result = frame['A'].groupby(level=0).count()
self.assertEqual(result.index.name, 'first')
def test_groupby_args(self):
# PR8618 and issue 8015
frame = self.mframe
def j():
frame.groupby()
self.assertRaisesRegexp(TypeError,
"You have to supply one of 'by' and 'level'",
j)
def k():
frame.groupby(by=None, level=None)
self.assertRaisesRegexp(TypeError,
"You have to supply one of 'by' and 'level'",
k)
def test_groupby_level_mapper(self):
frame = self.mframe
deleveled = frame.reset_index()
mapper0 = {'foo': 0, 'bar': 0, 'baz': 1, 'qux': 1}
mapper1 = {'one': 0, 'two': 0, 'three': 1}
result0 = frame.groupby(mapper0, level=0).sum()
result1 = frame.groupby(mapper1, level=1).sum()
mapped_level0 = np.array([mapper0.get(x) for x in deleveled['first']])
mapped_level1 = np.array([mapper1.get(x) for x in deleveled['second']])
expected0 = frame.groupby(mapped_level0).sum()
expected1 = frame.groupby(mapped_level1).sum()
expected0.index.name, expected1.index.name = 'first', 'second'
assert_frame_equal(result0, expected0)
assert_frame_equal(result1, expected1)
def test_groupby_level_0_nonmulti(self):
# #1313
a = Series([1, 2, 3, 10, 4, 5, 20, 6], Index([1, 2, 3, 1,
4, 5, 2, 6], name='foo'))
result = a.groupby(level=0).sum()
self.assertEqual(result.index.name, a.index.name)
def test_groupby_complex(self):
# GH 12902
a = Series(data=np.arange(4) * (1 + 2j), index=[0, 0, 1, 1])
expected = Series((1 + 2j, 5 + 10j))
result = a.groupby(level=0).sum()
assert_series_equal(result, expected)
result = a.sum(level=0)
assert_series_equal(result, expected)
def test_level_preserve_order(self):
grouped = self.mframe.groupby(level=0)
exp_labels = np.array([0, 0, 0, 1, 1, 2, 2, 3, 3, 3])
assert_almost_equal(grouped.grouper.labels[0], exp_labels)
def test_grouping_labels(self):
grouped = self.mframe.groupby(self.mframe.index.get_level_values(0))
exp_labels = np.array([2, 2, 2, 0, 0, 1, 1, 3, 3, 3])
assert_almost_equal(grouped.grouper.labels[0], exp_labels)
def test_cython_fail_agg(self):
dr = bdate_range('1/1/2000', periods=50)
ts = Series(['A', 'B', 'C', 'D', 'E'] * 10, index=dr)
grouped = ts.groupby(lambda x: x.month)
summed = grouped.sum()
expected = grouped.agg(np.sum)
assert_series_equal(summed, expected)
def test_apply_series_to_frame(self):
def f(piece):
return DataFrame({'value': piece,
'demeaned': piece - piece.mean(),
'logged': np.log(piece)})
dr = bdate_range('1/1/2000', periods=100)
ts = Series(np.random.randn(100), index=dr)
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(f)
tm.assertIsInstance(result, DataFrame)
self.assertTrue(result.index.equals(ts.index))
def test_apply_series_yield_constant(self):
result = self.df.groupby(['A', 'B'])['C'].apply(len)
self.assertEqual(result.index.names[:2], ('A', 'B'))
def test_apply_frame_to_series(self):
grouped = self.df.groupby(['A', 'B'])
result = grouped.apply(len)
expected = grouped.count()['C']
self.assertTrue(result.index.equals(expected.index))
self.assert_numpy_array_equal(result.values, expected.values)
def test_apply_frame_concat_series(self):
def trans(group):
return group.groupby('B')['C'].sum().sort_values()[:2]
def trans2(group):
grouped = group.groupby(df.reindex(group.index)['B'])
return grouped.sum().sort_values()[:2]
df = DataFrame({'A': np.random.randint(0, 5, 1000),
'B': np.random.randint(0, 5, 1000),
'C': np.random.randn(1000)})
result = df.groupby('A').apply(trans)
exp = df.groupby('A')['C'].apply(trans2)
assert_series_equal(result, exp, check_names=False)
self.assertEqual(result.name, 'C')
def test_apply_transform(self):
grouped = self.ts.groupby(lambda x: x.month)
result = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
assert_series_equal(result, expected)
def test_apply_multikey_corner(self):
grouped = self.tsframe.groupby([lambda x: x.year, lambda x: x.month])
def f(group):
return group.sort_values('A')[-5:]
result = grouped.apply(f)
for key, group in grouped:
assert_frame_equal(result.ix[key], f(group))
def test_mutate_groups(self):
# GH3380
mydf = DataFrame({
'cat1': ['a'] * 8 + ['b'] * 6,
'cat2': ['c'] * 2 + ['d'] * 2 + ['e'] * 2 + ['f'] * 2 + ['c'] * 2 +
['d'] * 2 + ['e'] * 2,
'cat3': lmap(lambda x: 'g%s' % x, lrange(1, 15)),
'val': np.random.randint(100, size=14),
})
def f_copy(x):
x = x.copy()
x['rank'] = x.val.rank(method='min')
return x.groupby('cat2')['rank'].min()
def f_no_copy(x):
x['rank'] = x.val.rank(method='min')
return x.groupby('cat2')['rank'].min()
grpby_copy = mydf.groupby('cat1').apply(f_copy)
grpby_no_copy = mydf.groupby('cat1').apply(f_no_copy)
assert_series_equal(grpby_copy, grpby_no_copy)
def test_no_mutate_but_looks_like(self):
# GH 8467
# first show's mutation indicator
# second does not, but should yield the same results
df = DataFrame({'key': [1, 1, 1, 2, 2, 2, 3, 3, 3], 'value': range(9)})
result1 = df.groupby('key', group_keys=True).apply(lambda x: x[:].key)
result2 = df.groupby('key', group_keys=True).apply(lambda x: x.key)
assert_series_equal(result1, result2)
def test_apply_chunk_view(self):
# Low level tinkering could be unsafe, make sure not
df = DataFrame({'key': [1, 1, 1, 2, 2, 2, 3, 3, 3],
'value': lrange(9)})
# return view
f = lambda x: x[:2]
result = df.groupby('key', group_keys=False).apply(f)
expected = df.take([0, 1, 3, 4, 6, 7])
assert_frame_equal(result, expected)
def test_apply_no_name_column_conflict(self):
df = DataFrame({'name': [1, 1, 1, 1, 1, 1, 2, 2, 2, 2],
'name2': [0, 0, 0, 1, 1, 1, 0, 0, 1, 1],
'value': lrange(10)[::-1]})
# it works! #2605
grouped = df.groupby(['name', 'name2'])
grouped.apply(lambda x: x.sort_values('value', inplace=True))
def test_groupby_series_indexed_differently(self):
s1 = Series([5.0, -9.0, 4.0, 100., -5., 55., 6.7],
index=Index(['a', 'b', 'c', 'd', 'e', 'f', 'g']))
s2 = Series([1.0, 1.0, 4.0, 5.0, 5.0, 7.0],
index=Index(['a', 'b', 'd', 'f', 'g', 'h']))
grouped = s1.groupby(s2)
agged = grouped.mean()
exp = s1.groupby(s2.reindex(s1.index).get).mean()
assert_series_equal(agged, exp)
def test_groupby_with_hier_columns(self):
tuples = list(zip(*[['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux',
'qux'], ['one', 'two', 'one', 'two', 'one', 'two',
'one', 'two']]))
index = MultiIndex.from_tuples(tuples)
columns = MultiIndex.from_tuples([('A', 'cat'), ('B', 'dog'), (
'B', 'cat'), ('A', 'dog')])
df = DataFrame(np.random.randn(8, 4), index=index, columns=columns)
result = df.groupby(level=0).mean()
self.assertTrue(result.columns.equals(columns))
result = df.groupby(level=0, axis=1).mean()
self.assertTrue(result.index.equals(df.index))
result = df.groupby(level=0).agg(np.mean)
self.assertTrue(result.columns.equals(columns))
result = df.groupby(level=0).apply(lambda x: x.mean())
self.assertTrue(result.columns.equals(columns))
result = df.groupby(level=0, axis=1).agg(lambda x: x.mean(1))
self.assertTrue(result.columns.equals(Index(['A', 'B'])))
self.assertTrue(result.index.equals(df.index))
# add a nuisance column
sorted_columns, _ = columns.sortlevel(0)
df['A', 'foo'] = 'bar'
result = df.groupby(level=0).mean()
self.assertTrue(result.columns.equals(df.columns[:-1]))
def test_pass_args_kwargs(self):
from numpy import percentile
def f(x, q=None, axis=0):
return percentile(x, q, axis=axis)
g = lambda x: percentile(x, 80, axis=0)
# Series
ts_grouped = self.ts.groupby(lambda x: x.month)
agg_result = ts_grouped.agg(percentile, 80, axis=0)
apply_result = ts_grouped.apply(percentile, 80, axis=0)
trans_result = ts_grouped.transform(percentile, 80, axis=0)
agg_expected = ts_grouped.quantile(.8)
trans_expected = ts_grouped.transform(g)
assert_series_equal(apply_result, agg_expected)
assert_series_equal(agg_result, agg_expected)
assert_series_equal(trans_result, trans_expected)
agg_result = ts_grouped.agg(f, q=80)
apply_result = ts_grouped.apply(f, q=80)
trans_result = ts_grouped.transform(f, q=80)
assert_series_equal(agg_result, agg_expected)
assert_series_equal(apply_result, agg_expected)
assert_series_equal(trans_result, trans_expected)
# DataFrame
df_grouped = self.tsframe.groupby(lambda x: x.month)
agg_result = df_grouped.agg(percentile, 80, axis=0)
apply_result = df_grouped.apply(DataFrame.quantile, .8)
expected = df_grouped.quantile(.8)
assert_frame_equal(apply_result, expected)
assert_frame_equal(agg_result, expected)
agg_result = df_grouped.agg(f, q=80)
apply_result = df_grouped.apply(DataFrame.quantile, q=.8)
assert_frame_equal(agg_result, expected)
assert_frame_equal(apply_result, expected)
def test_size(self):
grouped = self.df.groupby(['A', 'B'])
result = grouped.size()
for key, group in grouped:
self.assertEqual(result[key], len(group))
grouped = self.df.groupby('A')
result = grouped.size()
for key, group in grouped:
self.assertEqual(result[key], len(group))
grouped = self.df.groupby('B')
result = grouped.size()
for key, group in grouped:
self.assertEqual(result[key], len(group))
df = DataFrame(np.random.choice(20, (1000, 3)), columns=list('abc'))
for sort, key in cart_product((False, True), ('a', 'b', ['a', 'b'])):
left = df.groupby(key, sort=sort).size()
right = df.groupby(key, sort=sort)['c'].apply(lambda a: a.shape[0])
assert_series_equal(left, right, check_names=False)
# GH11699
df = DataFrame([], columns=['A', 'B'])
out = Series([], dtype='int64', index=Index([], name='A'))
assert_series_equal(df.groupby('A').size(), out)
def test_count(self):
from string import ascii_lowercase
n = 1 << 15
dr = date_range('2015-08-30', periods=n // 10, freq='T')
df = DataFrame({
'1st': np.random.choice(
list(ascii_lowercase), n),
'2nd': np.random.randint(0, 5, n),
'3rd': np.random.randn(n).round(3),
'4th': np.random.randint(-10, 10, n),
'5th': np.random.choice(dr, n),
'6th': np.random.randn(n).round(3),
'7th': np.random.randn(n).round(3),
'8th': np.random.choice(dr, n) - np.random.choice(dr, 1),
'9th': np.random.choice(
list(ascii_lowercase), n)
})
for col in df.columns.drop(['1st', '2nd', '4th']):
df.loc[np.random.choice(n, n // 10), col] = np.nan
df['9th'] = df['9th'].astype('category')
for key in '1st', '2nd', ['1st', '2nd']:
left = df.groupby(key).count()
right = df.groupby(key).apply(DataFrame.count).drop(key, axis=1)
assert_frame_equal(left, right)
# GH5610
# count counts non-nulls
df = pd.DataFrame([[1, 2, 'foo'], [1, nan, 'bar'], [3, nan, nan]],
columns=['A', 'B', 'C'])
count_as = df.groupby('A').count()
count_not_as = df.groupby('A', as_index=False).count()
expected = DataFrame([[1, 2], [0, 0]], columns=['B', 'C'],
index=[1, 3])
expected.index.name = 'A'
assert_frame_equal(count_not_as, expected.reset_index())
assert_frame_equal(count_as, expected)
count_B = df.groupby('A')['B'].count()
assert_series_equal(count_B, expected['B'])
def test_count_object(self):
df = pd.DataFrame({'a': ['a'] * 3 + ['b'] * 3, 'c': [2] * 3 + [3] * 3})
result = df.groupby('c').a.count()
expected = pd.Series([
3, 3
], index=pd.Index([2, 3], name='c'), name='a')
tm.assert_series_equal(result, expected)
df = pd.DataFrame({'a': ['a', np.nan, np.nan] + ['b'] * 3,
'c': [2] * 3 + [3] * 3})
result = df.groupby('c').a.count()
expected = pd.Series([
1, 3
], index=pd.Index([2, 3], name='c'), name='a')
tm.assert_series_equal(result, expected)
def test_count_cross_type(self): # GH8169
vals = np.hstack((np.random.randint(0, 5, (100, 2)), np.random.randint(
0, 2, (100, 2))))
df = pd.DataFrame(vals, columns=['a', 'b', 'c', 'd'])
df[df == 2] = np.nan
expected = df.groupby(['c', 'd']).count()
for t in ['float32', 'object']:
df['a'] = df['a'].astype(t)
df['b'] = df['b'].astype(t)
result = df.groupby(['c', 'd']).count()
tm.assert_frame_equal(result, expected)
def test_non_cython_api(self):
# GH5610
# non-cython calls should not include the grouper
df = DataFrame(
[[1, 2, 'foo'], [1,
nan,
'bar', ], [3, nan, 'baz']
], columns=['A', 'B', 'C'])
g = df.groupby('A')
gni = df.groupby('A', as_index=False)
# mad
expected = DataFrame([[0], [nan]], columns=['B'], index=[1, 3])
expected.index.name = 'A'
result = g.mad()
assert_frame_equal(result, expected)
expected = DataFrame([[0., 0.], [0, nan]], columns=['A', 'B'],
index=[0, 1])
result = gni.mad()
assert_frame_equal(result, expected)
# describe
expected = DataFrame(dict(B=concat(
[df.loc[[0, 1], 'B'].describe(), df.loc[[2], 'B'].describe()],
keys=[1, 3])))
expected.index.names = ['A', None]
result = g.describe()
assert_frame_equal(result, expected)
expected = concat(
[df.loc[[0, 1], ['A', 'B']].describe(),
df.loc[[2], ['A', 'B']].describe()], keys=[0, 1])
result = gni.describe()
assert_frame_equal(result, expected)
# any
expected = DataFrame([[True, True], [False, True]], columns=['B', 'C'],
index=[1, 3])
expected.index.name = 'A'
result = g.any()
assert_frame_equal(result, expected)
# idxmax
expected = DataFrame([[0], [nan]], columns=['B'], index=[1, 3])
expected.index.name = 'A'
result = g.idxmax()
assert_frame_equal(result, expected)
def test_cython_api2(self):
# this takes the fast apply path
# cumsum (GH5614)
df = DataFrame(
[[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]
], columns=['A', 'B', 'C'])
expected = DataFrame(
[[2, np.nan], [np.nan, 9], [4, 9]], columns=['B', 'C'])
result = df.groupby('A').cumsum()
assert_frame_equal(result, expected)
# GH 5755 - cumsum is a transformer and should ignore as_index
result = df.groupby('A', as_index=False).cumsum()
assert_frame_equal(result, expected)
def test_grouping_ndarray(self):
grouped = self.df.groupby(self.df['A'].values)
result = grouped.sum()
expected = self.df.groupby('A').sum()
assert_frame_equal(result, expected, check_names=False
) # Note: no names when grouping by value
def test_agg_consistency(self):
# agg with ([]) and () not consistent
# GH 6715
def P1(a):
try:
return np.percentile(a.dropna(), q=1)
except:
return np.nan
import datetime as dt
df = DataFrame({'col1': [1, 2, 3, 4],
'col2': [10, 25, 26, 31],
'date': [dt.date(2013, 2, 10), dt.date(2013, 2, 10),
dt.date(2013, 2, 11), dt.date(2013, 2, 11)]})
g = df.groupby('date')
expected = g.agg([P1])
expected.columns = expected.columns.levels[0]
result = g.agg(P1)
assert_frame_equal(result, expected)
def test_apply_typecast_fail(self):
df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
'c': np.tile(
['a', 'b', 'c'], 2),
'v': np.arange(1., 7.)})
def f(group):
v = group['v']
group['v2'] = (v - v.min()) / (v.max() - v.min())
return group
result = df.groupby('d').apply(f)
expected = df.copy()
expected['v2'] = np.tile([0., 0.5, 1], 2)
assert_frame_equal(result, expected)
def test_apply_multiindex_fail(self):
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]
])
df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
'c': np.tile(['a', 'b', 'c'], 2),
'v': np.arange(1., 7.)}, index=index)
def f(group):
v = group['v']
group['v2'] = (v - v.min()) / (v.max() - v.min())
return group
result = df.groupby('d').apply(f)
expected = df.copy()
expected['v2'] = np.tile([0., 0.5, 1], 2)
assert_frame_equal(result, expected)
def test_apply_corner(self):
result = self.tsframe.groupby(lambda x: x.year).apply(lambda x: x * 2)
expected = self.tsframe * 2
assert_frame_equal(result, expected)
def test_apply_without_copy(self):
# GH 5545
# returning a non-copy in an applied function fails
data = DataFrame({'id_field': [100, 100, 200, 300],
'category': ['a', 'b', 'c', 'c'],
'value': [1, 2, 3, 4]})
def filt1(x):
if x.shape[0] == 1:
return x.copy()
else:
return x[x.category == 'c']
def filt2(x):
if x.shape[0] == 1:
return x
else:
return x[x.category == 'c']
expected = data.groupby('id_field').apply(filt1)
result = data.groupby('id_field').apply(filt2)
assert_frame_equal(result, expected)
def test_apply_use_categorical_name(self):
from pandas import qcut
cats = qcut(self.df.C, 4)
def get_stats(group):
return {'min': group.min(),
'max': group.max(),
'count': group.count(),
'mean': group.mean()}
result = self.df.groupby(cats).D.apply(get_stats)
self.assertEqual(result.index.names[0], 'C')
def test_apply_categorical_data(self):
# GH 10138
for ordered in [True, False]:
dense = Categorical(list('abc'), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(
list('aaa'), categories=['a', 'b'], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({'missing': missing,
'dense': dense,
'values': values})
grouped = df.groupby(['missing', 'dense'])
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_product([['a', 'b'], ['a', 'b', 'c']],
names=['missing', 'dense'])
expected = DataFrame([0, 1, 2, np.nan, np.nan, np.nan],
index=idx,
columns=['values'])
assert_frame_equal(grouped.apply(lambda x: np.mean(x)), expected)
assert_frame_equal(grouped.mean(), expected)
assert_frame_equal(grouped.agg(np.mean), expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_product([['a'], ['a', 'b', 'c']],
names=['missing', 'dense'])
expected = Series(1, index=idx)
assert_series_equal(grouped.apply(lambda x: 1), expected)
def test_apply_corner_cases(self):
# #535, can't use sliding iterator
N = 1000
labels = np.random.randint(0, 100, size=N)
df = DataFrame({'key': labels,
'value1': np.random.randn(N),
'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4)})
grouped = df.groupby('key')
def f(g):
g['value3'] = g['value1'] * 2
return g
result = grouped.apply(f)
self.assertTrue('value3' in result)
def test_transform_mixed_type(self):
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]
])
df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
'c': np.tile(['a', 'b', 'c'], 2),
'v': np.arange(1., 7.)}, index=index)
def f(group):
group['g'] = group['d'] * 2
return group[:1]
grouped = df.groupby('c')
result = grouped.apply(f)
self.assertEqual(result['d'].dtype, np.float64)
# this is by definition a mutating operation!
with option_context('mode.chained_assignment', None):
for key, group in grouped:
res = f(group)
assert_frame_equal(res, result.ix[key])
def test_groupby_wrong_multi_labels(self):
from pandas import read_csv
data = """index,foo,bar,baz,spam,data
0,foo1,bar1,baz1,spam2,20
1,foo1,bar2,baz1,spam3,30
2,foo2,bar2,baz1,spam2,40
3,foo1,bar1,baz2,spam1,50
4,foo3,bar1,baz2,spam1,60"""
data = read_csv(StringIO(data), index_col=0)
grouped = data.groupby(['foo', 'bar', 'baz', 'spam'])
result = grouped.agg(np.mean)
expected = grouped.mean()
assert_frame_equal(result, expected)
def test_groupby_series_with_name(self):
result = self.df.groupby(self.df['A']).mean()
result2 = self.df.groupby(self.df['A'], as_index=False).mean()
self.assertEqual(result.index.name, 'A')
self.assertIn('A', result2)
result = self.df.groupby([self.df['A'], self.df['B']]).mean()
result2 = self.df.groupby([self.df['A'], self.df['B']],
as_index=False).mean()
self.assertEqual(result.index.names, ('A', 'B'))
self.assertIn('A', result2)
self.assertIn('B', result2)
def test_seriesgroupby_name_attr(self):
# GH 6265
result = self.df.groupby('A')['C']
self.assertEqual(result.count().name, 'C')
self.assertEqual(result.mean().name, 'C')
testFunc = lambda x: np.sum(x) * 2
self.assertEqual(result.agg(testFunc).name, 'C')
def test_consistency_name(self):
# GH 12363
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
expected = df.groupby(['A']).B.count()
result = df.B.groupby(df.A).count()
assert_series_equal(result, expected)
def test_groupby_name_propagation(self):
# GH 6124
def summarize(df, name=None):
return Series({'count': 1, 'mean': 2, 'omissions': 3, }, name=name)
def summarize_random_name(df):
# Provide a different name for each Series. In this case, groupby
# should not attempt to propagate the Series name since they are
# inconsistent.
return Series({
'count': 1,
'mean': 2,
'omissions': 3,
}, name=df.iloc[0]['A'])
metrics = self.df.groupby('A').apply(summarize)
self.assertEqual(metrics.columns.name, None)
metrics = self.df.groupby('A').apply(summarize, 'metrics')
self.assertEqual(metrics.columns.name, 'metrics')
metrics = self.df.groupby('A').apply(summarize_random_name)
self.assertEqual(metrics.columns.name, None)
def test_groupby_nonstring_columns(self):
df = DataFrame([np.arange(10) for x in range(10)])
grouped = df.groupby(0)
result = grouped.mean()
expected = df.groupby(df[0]).mean()
assert_frame_equal(result, expected)
def test_cython_grouper_series_bug_noncontig(self):
arr = np.empty((100, 100))
arr.fill(np.nan)
obj = Series(arr[:, 0], index=lrange(100))
inds = np.tile(lrange(10), 10)
result = obj.groupby(inds).agg(Series.median)
self.assertTrue(result.isnull().all())
def test_series_grouper_noncontig_index(self):
index = Index(tm.rands_array(10, 100))
values = Series(np.random.randn(50), index=index[::2])
labels = np.random.randint(0, 5, 50)
# it works!
grouped = values.groupby(labels)
# accessing the index elements causes segfault
f = lambda x: len(set(map(id, x.index)))
grouped.agg(f)
def test_convert_objects_leave_decimal_alone(self):
from decimal import Decimal
s = Series(lrange(5))
labels = np.array(['a', 'b', 'c', 'd', 'e'], dtype='O')
def convert_fast(x):
return Decimal(str(x.mean()))
def convert_force_pure(x):
# base will be length 0
assert (len(x.base) > 0)
return Decimal(str(x.mean()))
grouped = s.groupby(labels)
result = grouped.agg(convert_fast)
self.assertEqual(result.dtype, np.object_)
tm.assertIsInstance(result[0], Decimal)
result = grouped.agg(convert_force_pure)
self.assertEqual(result.dtype, np.object_)
tm.assertIsInstance(result[0], Decimal)
def test_fast_apply(self):
# make sure that fast apply is correctly called
# rather than raising any kind of error
# otherwise the python path will be callsed
# which slows things down
N = 1000
labels = np.random.randint(0, 2000, size=N)
labels2 = np.random.randint(0, 3, size=N)
df = DataFrame({'key': labels,
'key2': labels2,
'value1': np.random.randn(N),
'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4)})
def f(g):
return 1
g = df.groupby(['key', 'key2'])
grouper = g.grouper
splitter = grouper._get_splitter(g._selected_obj, axis=g.axis)
group_keys = grouper._get_group_keys()
values, mutated = splitter.fast_apply(f, group_keys)
self.assertFalse(mutated)
def test_apply_with_mixed_dtype(self):
# GH3480, apply with mixed dtype on axis=1 breaks in 0.11
df = DataFrame({'foo1': ['one', 'two', 'two', 'three', 'one', 'two'],
'foo2': np.random.randn(6)})
result = df.apply(lambda x: x, axis=1)
assert_series_equal(df.get_dtype_counts(), result.get_dtype_counts())
# GH 3610 incorrect dtype conversion with as_index=False
df = DataFrame({"c1": [1, 2, 6, 6, 8]})
df["c2"] = df.c1 / 2.0
result1 = df.groupby("c2").mean().reset_index().c2
result2 = df.groupby("c2", as_index=False).mean().c2
assert_series_equal(result1, result2)
def test_groupby_aggregation_mixed_dtype(self):
# GH 6212
expected = DataFrame({
'v1': [5, 5, 7, np.nan, 3, 3, 4, 1],
'v2': [55, 55, 77, np.nan, 33, 33, 44, 11]},
index=MultiIndex.from_tuples([(1, 95), (1, 99), (2, 95), (2, 99),
('big', 'damp'),
('blue', 'dry'),
('red', 'red'), ('red', 'wet')],
names=['by1', 'by2']))
df = DataFrame({
'v1': [1, 3, 5, 7, 8, 3, 5, np.nan, 4, 5, 7, 9],
'v2': [11, 33, 55, 77, 88, 33, 55, np.nan, 44, 55, 77, 99],
'by1': ["red", "blue", 1, 2, np.nan, "big", 1, 2, "red", 1, np.nan,
12],
'by2': ["wet", "dry", 99, 95, np.nan, "damp", 95, 99, "red", 99,
np.nan, np.nan]
})
g = df.groupby(['by1', 'by2'])
result = g[['v1', 'v2']].mean()
assert_frame_equal(result, expected)
def test_groupby_dtype_inference_empty(self):
# GH 6733
df = DataFrame({'x': [], 'range': np.arange(0, dtype='int64')})
self.assertEqual(df['x'].dtype, np.float64)
result = df.groupby('x').first()
exp_index = Index([], name='x', dtype=np.float64)
expected = DataFrame({'range': Series(
[], index=exp_index, dtype='int64')})
assert_frame_equal(result, expected, by_blocks=True)
def test_groupby_list_infer_array_like(self):
result = self.df.groupby(list(self.df['A'])).mean()
expected = self.df.groupby(self.df['A']).mean()
assert_frame_equal(result, expected, check_names=False)
self.assertRaises(Exception, self.df.groupby, list(self.df['A'][:-1]))
# pathological case of ambiguity
df = DataFrame({'foo': [0, 1],
'bar': [3, 4],
'val': np.random.randn(2)})
result = df.groupby(['foo', 'bar']).mean()
expected = df.groupby([df['foo'], df['bar']]).mean()[['val']]
def test_groupby_keys_same_size_as_index(self):
# GH 11185
freq = 's'
index = pd.date_range(start=pd.Timestamp('2015-09-29T11:34:44-0700'),
periods=2, freq=freq)
df = pd.DataFrame([['A', 10], ['B', 15]], columns=[
'metric', 'values'
], index=index)
result = df.groupby([pd.Grouper(level=0, freq=freq), 'metric']).mean()
expected = df.set_index([df.index, 'metric'])
assert_frame_equal(result, expected)
def test_groupby_one_row(self):
# GH 11741
df1 = pd.DataFrame(np.random.randn(1, 4), columns=list('ABCD'))
self.assertRaises(KeyError, df1.groupby, 'Z')
df2 = pd.DataFrame(np.random.randn(2, 4), columns=list('ABCD'))
self.assertRaises(KeyError, df2.groupby, 'Z')
def test_groupby_nat_exclude(self):
# GH 6992
df = pd.DataFrame(
{'values': np.random.randn(8),
'dt': [np.nan, pd.Timestamp('2013-01-01'), np.nan, pd.Timestamp(
'2013-02-01'), np.nan, pd.Timestamp('2013-02-01'), np.nan,
pd.Timestamp('2013-01-01')],
'str': [np.nan, 'a', np.nan, 'a', np.nan, 'a', np.nan, 'b']})
grouped = df.groupby('dt')
expected = [[1, 7], [3, 5]]
keys = sorted(grouped.groups.keys())
self.assertEqual(len(keys), 2)
for k, e in zip(keys, expected):
# grouped.groups keys are np.datetime64 with system tz
# not to be affected by tz, only compare values
self.assertEqual(grouped.groups[k], e)
# confirm obj is not filtered
tm.assert_frame_equal(grouped.grouper.groupings[0].obj, df)
self.assertEqual(grouped.ngroups, 2)
expected = {Timestamp('2013-01-01 00:00:00'): np.array([1, 7]),
Timestamp('2013-02-01 00:00:00'): np.array([3, 5])}
for k in grouped.indices:
self.assert_numpy_array_equal(grouped.indices[k], expected[k])
tm.assert_frame_equal(
grouped.get_group(Timestamp('2013-01-01')), df.iloc[[1, 7]])
tm.assert_frame_equal(
grouped.get_group(Timestamp('2013-02-01')), df.iloc[[3, 5]])
self.assertRaises(KeyError, grouped.get_group, pd.NaT)
nan_df = DataFrame({'nan': [np.nan, np.nan, np.nan],
'nat': [pd.NaT, pd.NaT, pd.NaT]})
self.assertEqual(nan_df['nan'].dtype, 'float64')
self.assertEqual(nan_df['nat'].dtype, 'datetime64[ns]')
for key in ['nan', 'nat']:
grouped = nan_df.groupby(key)
self.assertEqual(grouped.groups, {})
self.assertEqual(grouped.ngroups, 0)
self.assertEqual(grouped.indices, {})
self.assertRaises(KeyError, grouped.get_group, np.nan)
self.assertRaises(KeyError, grouped.get_group, pd.NaT)
def test_dictify(self):
dict(iter(self.df.groupby('A')))
dict(iter(self.df.groupby(['A', 'B'])))
dict(iter(self.df['C'].groupby(self.df['A'])))
dict(iter(self.df['C'].groupby([self.df['A'], self.df['B']])))
dict(iter(self.df.groupby('A')['C']))
dict(iter(self.df.groupby(['A', 'B'])['C']))
def test_sparse_friendly(self):
sdf = self.df[['C', 'D']].to_sparse()
panel = tm.makePanel()
tm.add_nans(panel)
def _check_work(gp):
gp.mean()
gp.agg(np.mean)
dict(iter(gp))
# it works!
_check_work(sdf.groupby(lambda x: x // 2))
_check_work(sdf['C'].groupby(lambda x: x // 2))
_check_work(sdf.groupby(self.df['A']))
# do this someday
# _check_work(panel.groupby(lambda x: x.month, axis=1))
def test_panel_groupby(self):
self.panel = tm.makePanel()
tm.add_nans(self.panel)
grouped = self.panel.groupby({'ItemA': 0, 'ItemB': 0, 'ItemC': 1},
axis='items')
agged = grouped.mean()
agged2 = grouped.agg(lambda x: x.mean('items'))
tm.assert_panel_equal(agged, agged2)
self.assert_numpy_array_equal(agged.items, [0, 1])
grouped = self.panel.groupby(lambda x: x.month, axis='major')
agged = grouped.mean()
self.assert_numpy_array_equal(agged.major_axis, sorted(list(set(
self.panel.major_axis.month))))
grouped = self.panel.groupby({'A': 0, 'B': 0, 'C': 1, 'D': 1},
axis='minor')
agged = grouped.mean()
self.assert_numpy_array_equal(agged.minor_axis, [0, 1])
def test_numpy_groupby(self):
from pandas.core.groupby import numpy_groupby
data = np.random.randn(100, 100)
labels = np.random.randint(0, 10, size=100)
df = DataFrame(data)
result = df.groupby(labels).sum().values
expected = numpy_groupby(data, labels)
assert_almost_equal(result, expected)
result = df.groupby(labels, axis=1).sum().values
expected = numpy_groupby(data, labels, axis=1)
assert_almost_equal(result, expected)
def test_groupby_2d_malformed(self):
d = DataFrame(index=lrange(2))
d['group'] = ['g1', 'g2']
d['zeros'] = [0, 0]
d['ones'] = [1, 1]
d['label'] = ['l1', 'l2']
tmp = d.groupby(['group']).mean()
res_values = np.array([[0., 1.], [0., 1.]])
self.assert_numpy_array_equal(tmp.columns, ['zeros', 'ones'])
self.assert_numpy_array_equal(tmp.values, res_values)
def test_int32_overflow(self):
B = np.concatenate((np.arange(10000), np.arange(10000), np.arange(5000)
))
A = np.arange(25000)
df = DataFrame({'A': A,
'B': B,
'C': A,
'D': B,
'E': np.random.randn(25000)})
left = df.groupby(['A', 'B', 'C', 'D']).sum()
right = df.groupby(['D', 'C', 'B', 'A']).sum()
self.assertEqual(len(left), len(right))
def test_int64_overflow(self):
from pandas.core.groupby import _int64_overflow_possible
B = np.concatenate((np.arange(1000), np.arange(1000), np.arange(500)))
A = np.arange(2500)
df = DataFrame({'A': A,
'B': B,
'C': A,
'D': B,
'E': A,
'F': B,
'G': A,
'H': B,
'values': np.random.randn(2500)})
lg = df.groupby(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'])
rg = df.groupby(['H', 'G', 'F', 'E', 'D', 'C', 'B', 'A'])
left = lg.sum()['values']
right = rg.sum()['values']
exp_index, _ = left.index.sortlevel(0)
self.assertTrue(left.index.equals(exp_index))
exp_index, _ = right.index.sortlevel(0)
self.assertTrue(right.index.equals(exp_index))
tups = list(map(tuple, df[['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'
]].values))
tups = com._asarray_tuplesafe(tups)
expected = df.groupby(tups).sum()['values']
for k, v in compat.iteritems(expected):
self.assertEqual(left[k], right[k[::-1]])
self.assertEqual(left[k], v)
self.assertEqual(len(left), len(right))
# GH9096
values = range(55109)
data = pd.DataFrame.from_dict({'a': values,
'b': values,
'c': values,
'd': values})
grouped = data.groupby(['a', 'b', 'c', 'd'])
self.assertEqual(len(grouped), len(values))
arr = np.random.randint(-1 << 12, 1 << 12, (1 << 15, 5))
i = np.random.choice(len(arr), len(arr) * 4)
arr = np.vstack((arr, arr[i])) # add sume duplicate rows
i = np.random.permutation(len(arr))
arr = arr[i] # shuffle rows
df = DataFrame(arr, columns=list('abcde'))
df['jim'], df['joe'] = np.random.randn(2, len(df)) * 10
gr = df.groupby(list('abcde'))
# verify this is testing what it is supposed to test!
self.assertTrue(_int64_overflow_possible(gr.grouper.shape))
# mannually compute groupings
jim, joe = defaultdict(list), defaultdict(list)
for key, a, b in zip(map(tuple, arr), df['jim'], df['joe']):
jim[key].append(a)
joe[key].append(b)
self.assertEqual(len(gr), len(jim))
mi = MultiIndex.from_tuples(jim.keys(), names=list('abcde'))
def aggr(func):
f = lambda a: np.fromiter(map(func, a), dtype='f8')
arr = np.vstack((f(jim.values()), f(joe.values()))).T
res = DataFrame(arr, columns=['jim', 'joe'], index=mi)
return res.sort_index()
assert_frame_equal(gr.mean(), aggr(np.mean))
assert_frame_equal(gr.median(), aggr(np.median))
def test_groupby_sort_multi(self):
df = DataFrame({'a': ['foo', 'bar', 'baz'],
'b': [3, 2, 1],
'c': [0, 1, 2],
'd': np.random.randn(3)})
tups = lmap(tuple, df[['a', 'b', 'c']].values)
tups = com._asarray_tuplesafe(tups)
result = df.groupby(['a', 'b', 'c'], sort=True).sum()
self.assert_numpy_array_equal(result.index.values, tups[[1, 2, 0]])
tups = lmap(tuple, df[['c', 'a', 'b']].values)
tups = com._asarray_tuplesafe(tups)
result = df.groupby(['c', 'a', 'b'], sort=True).sum()
self.assert_numpy_array_equal(result.index.values, tups)
tups = lmap(tuple, df[['b', 'c', 'a']].values)
tups = com._asarray_tuplesafe(tups)
result = df.groupby(['b', 'c', 'a'], sort=True).sum()
self.assert_numpy_array_equal(result.index.values, tups[[2, 1, 0]])
df = DataFrame({'a': [0, 1, 2, 0, 1, 2],
'b': [0, 0, 0, 1, 1, 1],
'd': np.random.randn(6)})
grouped = df.groupby(['a', 'b'])['d']
result = grouped.sum()
_check_groupby(df, result, ['a', 'b'], 'd')
def test_intercept_builtin_sum(self):
s = Series([1., 2., np.nan, 3.])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(builtins.sum)
result2 = grouped.apply(builtins.sum)
expected = grouped.sum()
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_column_select_via_attr(self):
result = self.df.groupby('A').C.sum()
expected = self.df.groupby('A')['C'].sum()
assert_series_equal(result, expected)
self.df['mean'] = 1.5
result = self.df.groupby('A').mean()
expected = self.df.groupby('A').agg(np.mean)
assert_frame_equal(result, expected)
def test_rank_apply(self):
lev1 = tm.rands_array(10, 100)
lev2 = tm.rands_array(10, 130)
lab1 = np.random.randint(0, 100, size=500)
lab2 = np.random.randint(0, 130, size=500)
df = DataFrame({'value': np.random.randn(500),
'key1': lev1.take(lab1),
'key2': lev2.take(lab2)})
result = df.groupby(['key1', 'key2']).value.rank()
expected = []
for key, piece in df.groupby(['key1', 'key2']):
expected.append(piece.value.rank())
expected = concat(expected, axis=0)
expected = expected.reindex(result.index)
assert_series_equal(result, expected)
result = df.groupby(['key1', 'key2']).value.rank(pct=True)
expected = []
for key, piece in df.groupby(['key1', 'key2']):
expected.append(piece.value.rank(pct=True))
expected = concat(expected, axis=0)
expected = expected.reindex(result.index)
assert_series_equal(result, expected)
def test_dont_clobber_name_column(self):
df = DataFrame({'key': ['a', 'a', 'a', 'b', 'b', 'b'],
'name': ['foo', 'bar', 'baz'] * 2})
result = df.groupby('key').apply(lambda x: x)
assert_frame_equal(result, df)
def test_skip_group_keys(self):
from pandas import concat
tsf = tm.makeTimeDataFrame()
grouped = tsf.groupby(lambda x: x.month, group_keys=False)
result = grouped.apply(lambda x: x.sort_values(by='A')[:3])
pieces = []
for key, group in grouped:
pieces.append(group.sort_values(by='A')[:3])
expected = concat(pieces)
assert_frame_equal(result, expected)
grouped = tsf['A'].groupby(lambda x: x.month, group_keys=False)
result = grouped.apply(lambda x: x.sort_values()[:3])
pieces = []
for key, group in grouped:
pieces.append(group.sort_values()[:3])
expected = concat(pieces)
assert_series_equal(result, expected)
def test_no_nonsense_name(self):
# GH #995
s = self.frame['C'].copy()
s.name = None
result = s.groupby(self.frame['A']).agg(np.sum)
self.assertIsNone(result.name)
def test_wrap_agg_out(self):
grouped = self.three_group.groupby(['A', 'B'])
def func(ser):
if ser.dtype == np.object:
raise TypeError
else:
return ser.sum()
result = grouped.aggregate(func)
exp_grouped = self.three_group.ix[:, self.three_group.columns != 'C']
expected = exp_grouped.groupby(['A', 'B']).aggregate(func)
assert_frame_equal(result, expected)
def test_multifunc_sum_bug(self):
# GH #1065
x = DataFrame(np.arange(9).reshape(3, 3))
x['test'] = 0
x['fl'] = [1.3, 1.5, 1.6]
grouped = x.groupby('test')
result = grouped.agg({'fl': 'sum', 2: 'size'})
self.assertEqual(result['fl'].dtype, np.float64)
def test_handle_dict_return_value(self):
def f(group):
return {'min': group.min(), 'max': group.max()}
def g(group):
return Series({'min': group.min(), 'max': group.max()})
result = self.df.groupby('A')['C'].apply(f)
expected = self.df.groupby('A')['C'].apply(g)
tm.assertIsInstance(result, Series)
assert_series_equal(result, expected)
def test_getitem_list_of_columns(self):
df = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8),
'E': np.random.randn(8)})
result = df.groupby('A')[['C', 'D']].mean()
result2 = df.groupby('A')['C', 'D'].mean()
result3 = df.groupby('A')[df.columns[2:4]].mean()
expected = df.ix[:, ['A', 'C', 'D']].groupby('A').mean()
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected)
def test_agg_multiple_functions_maintain_order(self):
# GH #610
funcs = [('mean', np.mean), ('max', np.max), ('min', np.min)]
result = self.df.groupby('A')['C'].agg(funcs)
exp_cols = ['mean', 'max', 'min']
self.assert_numpy_array_equal(result.columns, exp_cols)
def test_multiple_functions_tuples_and_non_tuples(self):
# #1359
funcs = [('foo', 'mean'), 'std']
ex_funcs = [('foo', 'mean'), ('std', 'std')]
result = self.df.groupby('A')['C'].agg(funcs)
expected = self.df.groupby('A')['C'].agg(ex_funcs)
assert_frame_equal(result, expected)
result = self.df.groupby('A').agg(funcs)
expected = self.df.groupby('A').agg(ex_funcs)
assert_frame_equal(result, expected)
def test_agg_multiple_functions_too_many_lambdas(self):
grouped = self.df.groupby('A')
funcs = ['mean', lambda x: x.mean(), lambda x: x.std()]
self.assertRaises(SpecificationError, grouped.agg, funcs)
def test_more_flexible_frame_multi_function(self):
from pandas import concat
grouped = self.df.groupby('A')
exmean = grouped.agg(OrderedDict([['C', np.mean], ['D', np.mean]]))
exstd = grouped.agg(OrderedDict([['C', np.std], ['D', np.std]]))
expected = concat([exmean, exstd], keys=['mean', 'std'], axis=1)
expected = expected.swaplevel(0, 1, axis=1).sortlevel(0, axis=1)
d = OrderedDict([['C', [np.mean, np.std]], ['D', [np.mean, np.std]]])
result = grouped.aggregate(d)
assert_frame_equal(result, expected)
# be careful
result = grouped.aggregate(OrderedDict([['C', np.mean],
['D', [np.mean, np.std]]]))
expected = grouped.aggregate(OrderedDict([['C', np.mean],
['D', [np.mean, np.std]]]))
assert_frame_equal(result, expected)
def foo(x):
return np.mean(x)
def bar(x):
return np.std(x, ddof=1)
d = OrderedDict([['C', np.mean], ['D', OrderedDict(
[['foo', np.mean], ['bar', np.std]])]])
result = grouped.aggregate(d)
d = OrderedDict([['C', [np.mean]], ['D', [foo, bar]]])
expected = grouped.aggregate(d)
assert_frame_equal(result, expected)
def test_multi_function_flexible_mix(self):
# GH #1268
grouped = self.df.groupby('A')
d = OrderedDict([['C', OrderedDict([['foo', 'mean'], [
'bar', 'std'
]])], ['D', 'sum']])
result = grouped.aggregate(d)
d2 = OrderedDict([['C', OrderedDict([['foo', 'mean'], [
'bar', 'std'
]])], ['D', ['sum']]])
result2 = grouped.aggregate(d2)
d3 = OrderedDict([['C', OrderedDict([['foo', 'mean'], [
'bar', 'std'
]])], ['D', {'sum': 'sum'}]])
expected = grouped.aggregate(d3)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_agg_callables(self):
# GH 7929
df = DataFrame({'foo': [1, 2], 'bar': [3, 4]}).astype(np.int64)
class fn_class(object):
def __call__(self, x):
return sum(x)
equiv_callables = [sum, np.sum, lambda x: sum(x), lambda x: x.sum(),
partial(sum), fn_class()]
expected = df.groupby("foo").agg(sum)
for ecall in equiv_callables:
result = df.groupby('foo').agg(ecall)
assert_frame_equal(result, expected)
def test_set_group_name(self):
def f(group):
assert group.name is not None
return group
def freduce(group):
assert group.name is not None
return group.sum()
def foo(x):
return freduce(x)
def _check_all(grouped):
# make sure all these work
grouped.apply(f)
grouped.aggregate(freduce)
grouped.aggregate({'C': freduce, 'D': freduce})
grouped.transform(f)
grouped['C'].apply(f)
grouped['C'].aggregate(freduce)
grouped['C'].aggregate([freduce, foo])
grouped['C'].transform(f)
_check_all(self.df.groupby('A'))
_check_all(self.df.groupby(['A', 'B']))
def test_no_dummy_key_names(self):
# GH #1291
result = self.df.groupby(self.df['A'].values).sum()
self.assertIsNone(result.index.name)
result = self.df.groupby([self.df['A'].values, self.df['B'].values
]).sum()
self.assertEqual(result.index.names, (None, None))
def test_groupby_sort_categorical(self):
# dataframe groupby sort was being ignored # GH 8868
df = DataFrame([['(7.5, 10]', 10, 10],
['(7.5, 10]', 8, 20],
['(2.5, 5]', 5, 30],
['(5, 7.5]', 6, 40],
['(2.5, 5]', 4, 50],
['(0, 2.5]', 1, 60],
['(5, 7.5]', 7, 70]], columns=['range', 'foo', 'bar'])
df['range'] = Categorical(df['range'], ordered=True)
index = CategoricalIndex(
['(0, 2.5]', '(2.5, 5]', '(5, 7.5]', '(7.5, 10]'], name='range')
result_sort = DataFrame([[1, 60], [5, 30], [6, 40], [10, 10]],
columns=['foo', 'bar'], index=index)
col = 'range'
assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
# when categories is ordered, group is ordered by category's order
assert_frame_equal(result_sort, df.groupby(col, sort=False).first())
df['range'] = Categorical(df['range'], ordered=False)
index = CategoricalIndex(
['(0, 2.5]', '(2.5, 5]', '(5, 7.5]', '(7.5, 10]'], name='range')
result_sort = DataFrame([[1, 60], [5, 30], [6, 40], [10, 10]],
columns=['foo', 'bar'], index=index)
index = CategoricalIndex(['(7.5, 10]', '(2.5, 5]',
'(5, 7.5]', '(0, 2.5]'],
name='range')
result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],
index=index, columns=['foo', 'bar'])
col = 'range'
# this is an unordered categorical, but we allow this ####
assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
assert_frame_equal(result_nosort, df.groupby(col, sort=False).first())
def test_groupby_sort_categorical_datetimelike(self):
# GH10505
# use same data as test_groupby_sort_categorical, which category is
# corresponding to datetime.month
df = DataFrame({'dt': [datetime(2011, 7, 1), datetime(2011, 7, 1),
datetime(2011, 2, 1), datetime(2011, 5, 1),
datetime(2011, 2, 1), datetime(2011, 1, 1),
datetime(2011, 5, 1)],
'foo': [10, 8, 5, 6, 4, 1, 7],
'bar': [10, 20, 30, 40, 50, 60, 70]},
columns=['dt', 'foo', 'bar'])
# ordered=True
df['dt'] = Categorical(df['dt'], ordered=True)
index = [datetime(2011, 1, 1), datetime(2011, 2, 1),
datetime(2011, 5, 1), datetime(2011, 7, 1)]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=['foo', 'bar'])
result_sort.index = CategoricalIndex(index, name='dt', ordered=True)
index = [datetime(2011, 7, 1), datetime(2011, 2, 1),
datetime(2011, 5, 1), datetime(2011, 1, 1)]
result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],
columns=['foo', 'bar'])
result_nosort.index = CategoricalIndex(index, categories=index,
name='dt', ordered=True)
col = 'dt'
assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
# when categories is ordered, group is ordered by category's order
assert_frame_equal(result_sort, df.groupby(col, sort=False).first())
# ordered = False
df['dt'] = Categorical(df['dt'], ordered=False)
index = [datetime(2011, 1, 1), datetime(2011, 2, 1),
datetime(2011, 5, 1), datetime(2011, 7, 1)]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=['foo', 'bar'])
result_sort.index = CategoricalIndex(index, name='dt')
index = [datetime(2011, 7, 1), datetime(2011, 2, 1),
datetime(2011, 5, 1), datetime(2011, 1, 1)]
result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],
columns=['foo', 'bar'])
result_nosort.index = CategoricalIndex(index, categories=index,
name='dt')
col = 'dt'
assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
assert_frame_equal(result_nosort, df.groupby(col, sort=False).first())
def test_groupby_sort_multiindex_series(self):
# series multiindex groupby sort argument was not being passed through
# _compress_group_index
# GH 9444
index = MultiIndex(levels=[[1, 2], [1, 2]],
labels=[[0, 0, 0, 0, 1, 1], [1, 1, 0, 0, 0, 0]],
names=['a', 'b'])
mseries = Series([0, 1, 2, 3, 4, 5], index=index)
index = MultiIndex(levels=[[1, 2], [1, 2]],
labels=[[0, 0, 1], [1, 0, 0]], names=['a', 'b'])
mseries_result = Series([0, 2, 4], index=index)
result = mseries.groupby(level=['a', 'b'], sort=False).first()
assert_series_equal(result, mseries_result)
result = mseries.groupby(level=['a', 'b'], sort=True).first()
assert_series_equal(result, mseries_result.sort_index())
def test_groupby_categorical(self):
levels = ['foo', 'bar', 'baz', 'qux']
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats).mean()
expected = data.groupby(np.asarray(cats)).mean()
exp_idx = CategoricalIndex(levels, ordered=True)
expected = expected.reindex(exp_idx)
assert_frame_equal(result, expected)
grouped = data.groupby(cats)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(
Categorical(ord_labels), sort=False).describe()
expected.index.names = [None, None]
assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(
np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
self.assert_index_equal(desc_result.index.get_level_values(0), exp)
exp = Index(['count', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max'] * 4)
self.assert_index_equal(desc_result.index.get_level_values(1), exp)
def test_groupby_datetime_categorical(self):
# GH9049: ensure backward compatibility
levels = pd.date_range('2014-01-01', periods=4)
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats).mean()
expected = data.groupby(np.asarray(cats)).mean()
expected = expected.reindex(levels)
expected.index = CategoricalIndex(expected.index,
categories=expected.index,
ordered=True)
assert_frame_equal(result, expected)
grouped = data.groupby(cats)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = cats.take_nd(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels).describe()
expected.index.names = [None, None]
assert_frame_equal(desc_result, expected)
tm.assert_index_equal(desc_result.index, expected.index)
tm.assert_index_equal(
desc_result.index.get_level_values(0),
expected.index.get_level_values(0))
# GH 10460
expc = Categorical.from_codes(
np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
self.assert_index_equal(desc_result.index.get_level_values(0), exp)
exp = Index(['count', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max'] * 4)
self.assert_index_equal(desc_result.index.get_level_values(1), exp)
def test_groupby_categorical_index(self):
levels = ['foo', 'bar', 'baz', 'qux']
codes = np.random.randint(0, 4, size=20)
cats = Categorical.from_codes(codes, levels, ordered=True)
df = DataFrame(
np.repeat(
np.arange(20), 4).reshape(-1, 4), columns=list('abcd'))
df['cats'] = cats
# with a cat index
result = df.set_index('cats').groupby(level=0).sum()
expected = df[list('abcd')].groupby(cats.codes).sum()
expected.index = CategoricalIndex(
Categorical.from_codes(
[0, 1, 2, 3], levels, ordered=True), name='cats')
assert_frame_equal(result, expected)
# with a cat column, should produce a cat index
result = df.groupby('cats').sum()
expected = df[list('abcd')].groupby(cats.codes).sum()
expected.index = CategoricalIndex(
Categorical.from_codes(
[0, 1, 2, 3], levels, ordered=True), name='cats')
assert_frame_equal(result, expected)
def test_groupby_describe_categorical_columns(self):
# GH 11558
cats = pd.CategoricalIndex(['qux', 'foo', 'baz', 'bar'],
categories=['foo', 'bar', 'baz', 'qux'],
ordered=True)
df = DataFrame(np.random.randn(20, 4), columns=cats)
result = df.groupby([1, 2, 3, 4] * 5).describe()
tm.assert_index_equal(result.columns, cats)
tm.assert_categorical_equal(result.columns.values, cats.values)
def test_groupby_unstack_categorical(self):
# GH11558 (example is taken from the original issue)
df = pd.DataFrame({'a': range(10),
'medium': ['A', 'B'] * 5,
'artist': list('XYXXY') * 2})
df['medium'] = df['medium'].astype('category')
gcat = df.groupby(['artist', 'medium'])['a'].count().unstack()
result = gcat.describe()
exp_columns = pd.CategoricalIndex(['A', 'B'], ordered=False,
name='medium')
tm.assert_index_equal(result.columns, exp_columns)
tm.assert_categorical_equal(result.columns.values, exp_columns.values)
result = gcat['A'] + gcat['B']
expected = pd.Series([6, 4], index=pd.Index(['X', 'Y'], name='artist'))
tm.assert_series_equal(result, expected)
def test_groupby_groups_datetimeindex(self):
# #1430
from pandas.tseries.api import DatetimeIndex
periods = 1000
ind = DatetimeIndex(start='2012/1/1', freq='5min', periods=periods)
df = DataFrame({'high': np.arange(periods),
'low': np.arange(periods)}, index=ind)
grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))
# it works!
groups = grouped.groups
tm.assertIsInstance(list(groups.keys())[0], datetime)
def test_groupby_groups_datetimeindex_tz(self):
# GH 3950
dates = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00', '2011-07-19 07:00:00',
'2011-07-19 08:00:00', '2011-07-19 09:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'datetime': dates,
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
df['datetime'] = df['datetime'].apply(
lambda d: Timestamp(d, tz='US/Pacific'))
exp_idx1 = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 09:00:00'],
tz='US/Pacific', name='datetime')
exp_idx2 = Index(['a', 'b'] * 3, name='label')
exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2])
expected = DataFrame({'value1': [0, 3, 1, 4, 2, 5],
'value2': [1, 2, 2, 1, 1, 2]},
index=exp_idx, columns=['value1', 'value2'])
result = df.groupby(['datetime', 'label']).sum()
assert_frame_equal(result, expected)
# by level
didx = pd.DatetimeIndex(dates, tz='Asia/Tokyo')
df = DataFrame({'value1': np.arange(6, dtype='int64'),
'value2': [1, 2, 3, 1, 2, 3]},
index=didx)
exp_idx = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'], tz='Asia/Tokyo')
expected = DataFrame({'value1': [3, 5, 7], 'value2': [2, 4, 6]},
index=exp_idx, columns=['value1', 'value2'])
result = df.groupby(level=0).sum()
assert_frame_equal(result, expected)
def test_groupby_multi_timezone(self):
# combining multiple / different timezones yields UTC
data = """0,2000-01-28 16:47:00,America/Chicago
1,2000-01-29 16:48:00,America/Chicago
2,2000-01-30 16:49:00,America/Los_Angeles
3,2000-01-31 16:50:00,America/Chicago
4,2000-01-01 16:50:00,America/New_York"""
df = pd.read_csv(StringIO(data), header=None,
names=['value', 'date', 'tz'])
result = df.groupby('tz').date.apply(
lambda x: pd.to_datetime(x).dt.tz_localize(x.name))
expected = Series([Timestamp('2000-01-28 16:47:00-0600',
tz='America/Chicago'),
Timestamp('2000-01-29 16:48:00-0600',
tz='America/Chicago'),
Timestamp('2000-01-30 16:49:00-0800',
tz='America/Los_Angeles'),
Timestamp('2000-01-31 16:50:00-0600',
tz='America/Chicago'),
Timestamp('2000-01-01 16:50:00-0500',
tz='America/New_York')],
name='date',
dtype=object)
assert_series_equal(result, expected)
tz = 'America/Chicago'
res_values = df.groupby('tz').date.get_group(tz)
result = pd.to_datetime(res_values).dt.tz_localize(tz)
exp_values = Series(['2000-01-28 16:47:00', '2000-01-29 16:48:00',
'2000-01-31 16:50:00'],
index=[0, 1, 3], name='date')
expected = pd.to_datetime(exp_values).dt.tz_localize(tz)
assert_series_equal(result, expected)
def test_groupby_groups_periods(self):
dates = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00', '2011-07-19 07:00:00',
'2011-07-19 08:00:00', '2011-07-19 09:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'period': [pd.Period(d, freq='H') for d in dates],
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
exp_idx1 = pd.PeriodIndex(['2011-07-19 07:00:00',
'2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 09:00:00'],
freq='H', name='period')
exp_idx2 = Index(['a', 'b'] * 3, name='label')
exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2])
expected = DataFrame({'value1': [0, 3, 1, 4, 2, 5],
'value2': [1, 2, 2, 1, 1, 2]},
index=exp_idx, columns=['value1', 'value2'])
result = df.groupby(['period', 'label']).sum()
assert_frame_equal(result, expected)
# by level
didx = pd.PeriodIndex(dates, freq='H')
df = DataFrame({'value1': np.arange(6, dtype='int64'),
'value2': [1, 2, 3, 1, 2, 3]},
index=didx)
exp_idx = pd.PeriodIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'], freq='H')
expected = DataFrame({'value1': [3, 5, 7], 'value2': [2, 4, 6]},
index=exp_idx, columns=['value1', 'value2'])
result = df.groupby(level=0).sum()
assert_frame_equal(result, expected)
def test_groupby_reindex_inside_function(self):
from pandas.tseries.api import DatetimeIndex
periods = 1000
ind = DatetimeIndex(start='2012/1/1', freq='5min', periods=periods)
df = DataFrame({'high': np.arange(
periods), 'low': np.arange(periods)}, index=ind)
def agg_before(hour, func, fix=False):
"""
Run an aggregate func on the subset of data.
"""
def _func(data):
d = data.select(lambda x: x.hour < 11).dropna()
if fix:
data[data.index[0]]
if len(d) == 0:
return None
return func(d)
return _func
def afunc(data):
d = data.select(lambda x: x.hour < 11).dropna()
return np.max(d)
grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))
closure_bad = grouped.agg({'high': agg_before(11, np.max)})
closure_good = grouped.agg({'high': agg_before(11, np.max, True)})
assert_frame_equal(closure_bad, closure_good)
def test_multiindex_columns_empty_level(self):
l = [['count', 'values'], ['to filter', '']]
midx = MultiIndex.from_tuples(l)
df = DataFrame([[long(1), 'A']], columns=midx)
grouped = df.groupby('to filter').groups
self.assert_numpy_array_equal(grouped['A'], [0])
grouped = df.groupby([('to filter', '')]).groups
self.assert_numpy_array_equal(grouped['A'], [0])
df = DataFrame([[long(1), 'A'], [long(2), 'B']], columns=midx)
expected = df.groupby('to filter').groups
result = df.groupby([('to filter', '')]).groups
self.assertEqual(result, expected)
df = DataFrame([[long(1), 'A'], [long(2), 'A']], columns=midx)
expected = df.groupby('to filter').groups
result = df.groupby([('to filter', '')]).groups
self.assertEqual(result, expected)
def test_cython_median(self):
df = DataFrame(np.random.randn(1000))
df.values[::2] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
labels[::17] = np.nan
result = df.groupby(labels).median()
exp = df.groupby(labels).agg(nanops.nanmedian)
assert_frame_equal(result, exp)
df = DataFrame(np.random.randn(1000, 5))
rs = df.groupby(labels).agg(np.median)
xp = df.groupby(labels).median()
assert_frame_equal(rs, xp)
def test_groupby_categorical_no_compress(self):
data = Series(np.random.randn(9))
codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)
result = data.groupby(cats).mean()
exp = data.groupby(codes).mean()
exp.index = CategoricalIndex(exp.index, categories=cats.categories,
ordered=cats.ordered)
assert_series_equal(result, exp)
codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])
cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)
result = data.groupby(cats).mean()
exp = data.groupby(codes).mean().reindex(cats.categories)
exp.index = CategoricalIndex(exp.index, categories=cats.categories,
ordered=cats.ordered)
assert_series_equal(result, exp)
cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"], ordered=True)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
result = data.groupby("b").mean()
result = result["a"].values
exp = np.array([1, 2, 4, np.nan])
self.assert_numpy_array_equal(result, exp)
def test_groupby_non_arithmetic_agg_types(self):
# GH9311, GH6620
df = pd.DataFrame([{'a': 1,
'b': 1}, {'a': 1,
'b': 2}, {'a': 2,
'b': 3}, {'a': 2,
'b': 4}])
dtypes = ['int8', 'int16', 'int32', 'int64', 'float32', 'float64']
grp_exp = {'first': {'df': [{'a': 1,
'b': 1}, {'a': 2,
'b': 3}]},
'last': {'df': [{'a': 1,
'b': 2}, {'a': 2,
'b': 4}]},
'min': {'df': [{'a': 1,
'b': 1}, {'a': 2,
'b': 3}]},
'max': {'df': [{'a': 1,
'b': 2}, {'a': 2,
'b': 4}]},
'nth': {'df': [{'a': 1,
'b': 2}, {'a': 2,
'b': 4}],
'args': [1]},
'count': {'df': [{'a': 1,
'b': 2}, {'a': 2,
'b': 2}],
'out_type': 'int64'}}
for dtype in dtypes:
df_in = df.copy()
df_in['b'] = df_in.b.astype(dtype)
for method, data in compat.iteritems(grp_exp):
if 'args' not in data:
data['args'] = []
if 'out_type' in data:
out_type = data['out_type']
else:
out_type = dtype
exp = data['df']
df_out = pd.DataFrame(exp)
df_out['b'] = df_out.b.astype(out_type)
df_out.set_index('a', inplace=True)
grpd = df_in.groupby('a')
t = getattr(grpd, method)(*data['args'])
assert_frame_equal(t, df_out)
def test_groupby_non_arithmetic_agg_intlike_precision(self):
# GH9311, GH6620
c = 24650000000000000
inputs = ((Timestamp('2011-01-15 12:50:28.502376'),
Timestamp('2011-01-20 12:50:28.593448')), (1 + c, 2 + c))
for i in inputs:
df = pd.DataFrame([{'a': 1, 'b': i[0]}, {'a': 1, 'b': i[1]}])
grp_exp = {'first': {'expected': i[0]},
'last': {'expected': i[1]},
'min': {'expected': i[0]},
'max': {'expected': i[1]},
'nth': {'expected': i[1],
'args': [1]},
'count': {'expected': 2}}
for method, data in compat.iteritems(grp_exp):
if 'args' not in data:
data['args'] = []
grpd = df.groupby('a')
res = getattr(grpd, method)(*data['args'])
self.assertEqual(res.iloc[0].b, data['expected'])
def test_groupby_first_datetime64(self):
df = DataFrame([(1, 1351036800000000000), (2, 1351036800000000000)])
df[1] = df[1].view('M8[ns]')
self.assertTrue(issubclass(df[1].dtype.type, np.datetime64))
result = df.groupby(level=0).first()
got_dt = result[1].dtype
self.assertTrue(issubclass(got_dt.type, np.datetime64))
result = df[1].groupby(level=0).first()
got_dt = result.dtype
self.assertTrue(issubclass(got_dt.type, np.datetime64))
def test_groupby_max_datetime64(self):
# GH 5869
# datetimelike dtype conversion from int
df = DataFrame(dict(A=Timestamp('20130101'), B=np.arange(5)))
expected = df.groupby('A')['A'].apply(lambda x: x.max())
result = df.groupby('A')['A'].max()
assert_series_equal(result, expected)
def test_groupby_datetime64_32_bit(self):
# GH 6410 / numpy 4328
# 32-bit under 1.9-dev indexing issue
df = DataFrame({"A": range(2), "B": [pd.Timestamp('2000-01-1')] * 2})
result = df.groupby("A")["B"].transform(min)
expected = Series([pd.Timestamp('2000-01-1')] * 2)
assert_series_equal(result, expected)
def test_groupby_categorical_unequal_len(self):
# GH3011
series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])
# The raises only happens with categorical, not with series of types
# category
bins = pd.cut(series.dropna().values, 4)
# len(bins) != len(series) here
self.assertRaises(ValueError, lambda: series.groupby(bins).mean())
def test_groupby_multiindex_missing_pair(self):
# GH9049
df = DataFrame({'group1': ['a', 'a', 'a', 'b'],
'group2': ['c', 'c', 'd', 'c'],
'value': [1, 1, 1, 5]})
df = df.set_index(['group1', 'group2'])
df_grouped = df.groupby(level=['group1', 'group2'], sort=True)
res = df_grouped.agg('sum')
idx = MultiIndex.from_tuples(
[('a', 'c'), ('a', 'd'), ('b', 'c')], names=['group1', 'group2'])
exp = DataFrame([[2], [1], [5]], index=idx, columns=['value'])
tm.assert_frame_equal(res, exp)
def test_groupby_multiindex_not_lexsorted(self):
# GH 11640
# define the lexsorted version
lexsorted_mi = MultiIndex.from_tuples(
[('a', ''), ('b1', 'c1'), ('b2', 'c2')], names=['b', 'c'])
lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
self.assertTrue(lexsorted_df.columns.is_lexsorted())
# define the non-lexsorted version
not_lexsorted_df = DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3],
[1, 'b2', 'c2', 4]])
not_lexsorted_df = not_lexsorted_df.pivot_table(
index='a', columns=['b', 'c'], values='d')
not_lexsorted_df = not_lexsorted_df.reset_index()
self.assertFalse(not_lexsorted_df.columns.is_lexsorted())
# compare the results
tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
expected = lexsorted_df.groupby('a').mean()
with tm.assert_produces_warning(com.PerformanceWarning):
result = not_lexsorted_df.groupby('a').mean()
tm.assert_frame_equal(expected, result)
def test_groupby_levels_and_columns(self):
# GH9344, GH9049
idx_names = ['x', 'y']
idx = pd.MultiIndex.from_tuples(
[(1, 1), (1, 2), (3, 4), (5, 6)], names=idx_names)
df = pd.DataFrame(np.arange(12).reshape(-1, 3), index=idx)
by_levels = df.groupby(level=idx_names).mean()
# reset_index changes columns dtype to object
by_columns = df.reset_index().groupby(idx_names).mean()
tm.assert_frame_equal(by_levels, by_columns, check_column_type=False)
by_columns.columns = pd.Index(by_columns.columns, dtype=np.int64)
tm.assert_frame_equal(by_levels, by_columns)
def test_gb_apply_list_of_unequal_len_arrays(self):
# GH1738
df = DataFrame({'group1': ['a', 'a', 'a', 'b', 'b', 'b', 'a', 'a', 'a',
'b', 'b', 'b'],
'group2': ['c', 'c', 'd', 'd', 'd', 'e', 'c', 'c', 'd',
'd', 'd', 'e'],
'weight': [1.1, 2, 3, 4, 5, 6, 2, 4, 6, 8, 1, 2],
'value': [7.1, 8, 9, 10, 11, 12, 8, 7, 6, 5, 4, 3]})
df = df.set_index(['group1', 'group2'])
df_grouped = df.groupby(level=['group1', 'group2'], sort=True)
def noddy(value, weight):
out = np.array(value * weight).repeat(3)
return out
# the kernel function returns arrays of unequal length
# pandas sniffs the first one, sees it's an array and not
# a list, and assumed the rest are of equal length
# and so tries a vstack
# don't die
df_grouped.apply(lambda x: noddy(x.value, x.weight))
def test_groupby_with_empty(self):
index = pd.DatetimeIndex(())
data = ()
series = pd.Series(data, index)
grouper = pd.tseries.resample.TimeGrouper('D')
grouped = series.groupby(grouper)
assert next(iter(grouped), None) is None
def test_aaa_groupby_with_small_elem(self):
# GH 8542
# length=2
df = pd.DataFrame({'event': ['start', 'start'],
'change': [1234, 5678]},
index=pd.DatetimeIndex(['2014-09-10', '2013-10-10']))
grouped = df.groupby([pd.TimeGrouper(freq='M'), 'event'])
self.assertEqual(len(grouped.groups), 2)
self.assertEqual(grouped.ngroups, 2)
self.assertIn((pd.Timestamp('2014-09-30'), 'start'), grouped.groups)
self.assertIn((pd.Timestamp('2013-10-31'), 'start'), grouped.groups)
res = grouped.get_group((pd.Timestamp('2014-09-30'), 'start'))
tm.assert_frame_equal(res, df.iloc[[0], :])
res = grouped.get_group((pd.Timestamp('2013-10-31'), 'start'))
tm.assert_frame_equal(res, df.iloc[[1], :])
df = pd.DataFrame({'event': ['start', 'start', 'start'],
'change': [1234, 5678, 9123]},
index=pd.DatetimeIndex(['2014-09-10', '2013-10-10',
'2014-09-15']))
grouped = df.groupby([pd.TimeGrouper(freq='M'), 'event'])
self.assertEqual(len(grouped.groups), 2)
self.assertEqual(grouped.ngroups, 2)
self.assertIn((pd.Timestamp('2014-09-30'), 'start'), grouped.groups)
self.assertIn((pd.Timestamp('2013-10-31'), 'start'), grouped.groups)
res = grouped.get_group((pd.Timestamp('2014-09-30'), 'start'))
tm.assert_frame_equal(res, df.iloc[[0, 2], :])
res = grouped.get_group((pd.Timestamp('2013-10-31'), 'start'))
tm.assert_frame_equal(res, df.iloc[[1], :])
# length=3
df = pd.DataFrame({'event': ['start', 'start', 'start'],
'change': [1234, 5678, 9123]},
index=pd.DatetimeIndex(['2014-09-10', '2013-10-10',
'2014-08-05']))
grouped = df.groupby([pd.TimeGrouper(freq='M'), 'event'])
self.assertEqual(len(grouped.groups), 3)
self.assertEqual(grouped.ngroups, 3)
self.assertIn((pd.Timestamp('2014-09-30'), 'start'), grouped.groups)
self.assertIn((pd.Timestamp('2013-10-31'), 'start'), grouped.groups)
self.assertIn((pd.Timestamp('2014-08-31'), 'start'), grouped.groups)
res = grouped.get_group((pd.Timestamp('2014-09-30'), 'start'))
tm.assert_frame_equal(res, df.iloc[[0], :])
res = grouped.get_group((pd.Timestamp('2013-10-31'), 'start'))
tm.assert_frame_equal(res, df.iloc[[1], :])
res = grouped.get_group((pd.Timestamp('2014-08-31'), 'start'))
tm.assert_frame_equal(res, df.iloc[[2], :])
def test_groupby_with_timezone_selection(self):
# GH 11616
# Test that column selection returns output in correct timezone.
np.random.seed(42)
df = pd.DataFrame({
'factor': np.random.randint(0, 3, size=60),
'time': pd.date_range('01/01/2000 00:00', periods=60,
freq='s', tz='UTC')
})
df1 = df.groupby('factor').max()['time']
df2 = df.groupby('factor')['time'].max()
tm.assert_series_equal(df1, df2)
def test_timezone_info(self):
# GH 11682
# Timezone info lost when broadcasting scalar datetime to DataFrame
tm._skip_if_no_pytz()
import pytz
df = pd.DataFrame({'a': [1], 'b': [datetime.now(pytz.utc)]})
tm.assert_equal(df['b'][0].tzinfo, pytz.utc)
df = pd.DataFrame({'a': [1, 2, 3]})
df['b'] = datetime.now(pytz.utc)
tm.assert_equal(df['b'][0].tzinfo, pytz.utc)
def test_groupby_with_timegrouper(self):
# GH 4161
# TimeGrouper requires a sorted index
# also verifies that the resultant index has the correct name
import datetime as DT
df_original = DataFrame({
'Buyer': 'Carl Carl Carl Carl Joe Carl'.split(),
'Quantity': [18, 3, 5, 1, 9, 3],
'Date': [
DT.datetime(2013, 9, 1, 13, 0),
DT.datetime(2013, 9, 1, 13, 5),
DT.datetime(2013, 10, 1, 20, 0),
DT.datetime(2013, 10, 3, 10, 0),
DT.datetime(2013, 12, 2, 12, 0),
DT.datetime(2013, 9, 2, 14, 0),
]
})
# GH 6908 change target column's order
df_reordered = df_original.sort_values(by='Quantity')
for df in [df_original, df_reordered]:
df = df.set_index(['Date'])
expected = DataFrame(
{'Quantity': np.nan},
index=date_range('20130901 13:00:00',
'20131205 13:00:00', freq='5D',
name='Date', closed='left'))
expected.iloc[[0, 6, 18], 0] = np.array(
[24., 6., 9.], dtype='float64')
result1 = df.resample('5D') .sum()
assert_frame_equal(result1, expected)
df_sorted = df.sort_index()
result2 = df_sorted.groupby(pd.TimeGrouper(freq='5D')).sum()
assert_frame_equal(result2, expected)
result3 = df.groupby(pd.TimeGrouper(freq='5D')).sum()
assert_frame_equal(result3, expected)
def test_groupby_with_timegrouper_methods(self):
# GH 3881
# make sure API of timegrouper conforms
import datetime as DT
df_original = pd.DataFrame({
'Branch': 'A A A A A B'.split(),
'Buyer': 'Carl Mark Carl Joe Joe Carl'.split(),
'Quantity': [1, 3, 5, 8, 9, 3],
'Date': [
DT.datetime(2013, 1, 1, 13, 0),
DT.datetime(2013, 1, 1, 13, 5),
DT.datetime(2013, 10, 1, 20, 0),
DT.datetime(2013, 10, 2, 10, 0),
DT.datetime(2013, 12, 2, 12, 0),
DT.datetime(2013, 12, 2, 14, 0),
]
})
df_sorted = df_original.sort_values(by='Quantity', ascending=False)
for df in [df_original, df_sorted]:
df = df.set_index('Date', drop=False)
g = df.groupby(pd.TimeGrouper('6M'))
self.assertTrue(g.group_keys)
self.assertTrue(isinstance(g.grouper, pd.core.groupby.BinGrouper))
groups = g.groups
self.assertTrue(isinstance(groups, dict))
self.assertTrue(len(groups) == 3)
def test_timegrouper_with_reg_groups(self):
# GH 3794
# allow combinateion of timegrouper/reg groups
import datetime as DT
df_original = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [
DT.datetime(2013, 1, 1, 13, 0),
DT.datetime(2013, 1, 1, 13, 5),
DT.datetime(2013, 10, 1, 20, 0),
DT.datetime(2013, 10, 2, 10, 0),
DT.datetime(2013, 10, 1, 20, 0),
DT.datetime(2013, 10, 2, 10, 0),
DT.datetime(2013, 12, 2, 12, 0),
DT.datetime(2013, 12, 2, 14, 0),
]
}).set_index('Date')
df_sorted = df_original.sort_values(by='Quantity', ascending=False)
for df in [df_original, df_sorted]:
expected = DataFrame({
'Buyer': 'Carl Joe Mark'.split(),
'Quantity': [10, 18, 3],
'Date': [
DT.datetime(2013, 12, 31, 0, 0),
DT.datetime(2013, 12, 31, 0, 0),
DT.datetime(2013, 12, 31, 0, 0),
]
}).set_index(['Date', 'Buyer'])
result = df.groupby([pd.Grouper(freq='A'), 'Buyer']).sum()
assert_frame_equal(result, expected)
expected = DataFrame({
'Buyer': 'Carl Mark Carl Joe'.split(),
'Quantity': [1, 3, 9, 18],
'Date': [
DT.datetime(2013, 1, 1, 0, 0),
DT.datetime(2013, 1, 1, 0, 0),
DT.datetime(2013, 7, 1, 0, 0),
DT.datetime(2013, 7, 1, 0, 0),
]
}).set_index(['Date', 'Buyer'])
result = df.groupby([pd.Grouper(freq='6MS'), 'Buyer']).sum()
assert_frame_equal(result, expected)
df_original = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [
DT.datetime(2013, 10, 1, 13, 0),
DT.datetime(2013, 10, 1, 13, 5),
DT.datetime(2013, 10, 1, 20, 0),
DT.datetime(2013, 10, 2, 10, 0),
DT.datetime(2013, 10, 1, 20, 0),
DT.datetime(2013, 10, 2, 10, 0),
DT.datetime(2013, 10, 2, 12, 0),
DT.datetime(2013, 10, 2, 14, 0),
]
}).set_index('Date')
df_sorted = df_original.sort_values(by='Quantity', ascending=False)
for df in [df_original, df_sorted]:
expected = DataFrame({
'Buyer': 'Carl Joe Mark Carl Joe'.split(),
'Quantity': [6, 8, 3, 4, 10],
'Date': [
DT.datetime(2013, 10, 1, 0, 0),
DT.datetime(2013, 10, 1, 0, 0),
DT.datetime(2013, 10, 1, 0, 0),
DT.datetime(2013, 10, 2, 0, 0),
DT.datetime(2013, 10, 2, 0, 0),
]
}).set_index(['Date', 'Buyer'])
result = df.groupby([pd.Grouper(freq='1D'), 'Buyer']).sum()
assert_frame_equal(result, expected)
result = df.groupby([pd.Grouper(freq='1M'), 'Buyer']).sum()
expected = DataFrame({
'Buyer': 'Carl Joe Mark'.split(),
'Quantity': [10, 18, 3],
'Date': [
DT.datetime(2013, 10, 31, 0, 0),
DT.datetime(2013, 10, 31, 0, 0),
DT.datetime(2013, 10, 31, 0, 0),
]
}).set_index(['Date', 'Buyer'])
assert_frame_equal(result, expected)
# passing the name
df = df.reset_index()
result = df.groupby([pd.Grouper(freq='1M', key='Date'), 'Buyer'
]).sum()
assert_frame_equal(result, expected)
with self.assertRaises(KeyError):
df.groupby([pd.Grouper(freq='1M', key='foo'), 'Buyer']).sum()
# passing the level
df = df.set_index('Date')
result = df.groupby([pd.Grouper(freq='1M', level='Date'), 'Buyer'
]).sum()
assert_frame_equal(result, expected)
result = df.groupby([pd.Grouper(freq='1M', level=0), 'Buyer']).sum(
)
assert_frame_equal(result, expected)
with self.assertRaises(ValueError):
df.groupby([pd.Grouper(freq='1M', level='foo'),
'Buyer']).sum()
# multi names
df = df.copy()
df['Date'] = df.index + pd.offsets.MonthEnd(2)
result = df.groupby([pd.Grouper(freq='1M', key='Date'), 'Buyer'
]).sum()
expected = DataFrame({
'Buyer': 'Carl Joe Mark'.split(),
'Quantity': [10, 18, 3],
'Date': [
DT.datetime(2013, 11, 30, 0, 0),
DT.datetime(2013, 11, 30, 0, 0),
DT.datetime(2013, 11, 30, 0, 0),
]
}).set_index(['Date', 'Buyer'])
assert_frame_equal(result, expected)
# error as we have both a level and a name!
with self.assertRaises(ValueError):
df.groupby([pd.Grouper(freq='1M', key='Date',
level='Date'), 'Buyer']).sum()
# single groupers
expected = DataFrame({'Quantity': [31],
'Date': [DT.datetime(2013, 10, 31, 0, 0)
]}).set_index('Date')
result = df.groupby(pd.Grouper(freq='1M')).sum()
assert_frame_equal(result, expected)
result = df.groupby([pd.Grouper(freq='1M')]).sum()
assert_frame_equal(result, expected)
expected = DataFrame({'Quantity': [31],
'Date': [DT.datetime(2013, 11, 30, 0, 0)
]}).set_index('Date')
result = df.groupby(pd.Grouper(freq='1M', key='Date')).sum()
assert_frame_equal(result, expected)
result = df.groupby([pd.Grouper(freq='1M', key='Date')]).sum()
assert_frame_equal(result, expected)
# GH 6764 multiple grouping with/without sort
df = DataFrame({
'date': pd.to_datetime([
'20121002', '20121007', '20130130', '20130202', '20130305',
'20121002', '20121207', '20130130', '20130202', '20130305',
'20130202', '20130305'
]),
'user_id': [1, 1, 1, 1, 1, 3, 3, 3, 5, 5, 5, 5],
'whole_cost': [1790, 364, 280, 259, 201, 623, 90, 312, 359, 301,
359, 801],
'cost1': [12, 15, 10, 24, 39, 1, 0, 90, 45, 34, 1, 12]
}).set_index('date')
for freq in ['D', 'M', 'A', 'Q-APR']:
expected = df.groupby('user_id')[
'whole_cost'].resample(
freq).sum().dropna().reorder_levels(
['date', 'user_id']).sortlevel().astype('int64')
expected.name = 'whole_cost'
result1 = df.sort_index().groupby([pd.TimeGrouper(freq=freq),
'user_id'])['whole_cost'].sum()
assert_series_equal(result1, expected)
result2 = df.groupby([pd.TimeGrouper(freq=freq), 'user_id'])[
'whole_cost'].sum()
assert_series_equal(result2, expected)
def test_timegrouper_get_group(self):
# GH 6914
df_original = DataFrame({
'Buyer': 'Carl Joe Joe Carl Joe Carl'.split(),
'Quantity': [18, 3, 5, 1, 9, 3],
'Date': [datetime(2013, 9, 1, 13, 0),
datetime(2013, 9, 1, 13, 5),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 3, 10, 0),
datetime(2013, 12, 2, 12, 0),
datetime(2013, 9, 2, 14, 0), ]
})
df_reordered = df_original.sort_values(by='Quantity')
# single grouping
expected_list = [df_original.iloc[[0, 1, 5]], df_original.iloc[[2, 3]],
df_original.iloc[[4]]]
dt_list = ['2013-09-30', '2013-10-31', '2013-12-31']
for df in [df_original, df_reordered]:
grouped = df.groupby(pd.Grouper(freq='M', key='Date'))
for t, expected in zip(dt_list, expected_list):
dt = pd.Timestamp(t)
result = grouped.get_group(dt)
assert_frame_equal(result, expected)
# multiple grouping
expected_list = [df_original.iloc[[1]], df_original.iloc[[3]],
df_original.iloc[[4]]]
g_list = [('Joe', '2013-09-30'), ('Carl', '2013-10-31'),
('Joe', '2013-12-31')]
for df in [df_original, df_reordered]:
grouped = df.groupby(['Buyer', pd.Grouper(freq='M', key='Date')])
for (b, t), expected in zip(g_list, expected_list):
dt = pd.Timestamp(t)
result = grouped.get_group((b, dt))
assert_frame_equal(result, expected)
# with index
df_original = df_original.set_index('Date')
df_reordered = df_original.sort_values(by='Quantity')
expected_list = [df_original.iloc[[0, 1, 5]], df_original.iloc[[2, 3]],
df_original.iloc[[4]]]
for df in [df_original, df_reordered]:
grouped = df.groupby(pd.Grouper(freq='M'))
for t, expected in zip(dt_list, expected_list):
dt = pd.Timestamp(t)
result = grouped.get_group(dt)
assert_frame_equal(result, expected)
def test_timegrouper_apply_return_type_series(self):
# Using `apply` with the `TimeGrouper` should give the
# same return type as an `apply` with a `Grouper`.
# Issue #11742
df = pd.DataFrame({'date': ['10/10/2000', '11/10/2000'],
'value': [10, 13]})
df_dt = df.copy()
df_dt['date'] = pd.to_datetime(df_dt['date'])
def sumfunc_series(x):
return pd.Series([x['value'].sum()], ('sum',))
expected = df.groupby(pd.Grouper(key='date')).apply(sumfunc_series)
result = (df_dt.groupby(pd.TimeGrouper(freq='M', key='date'))
.apply(sumfunc_series))
assert_frame_equal(result.reset_index(drop=True),
expected.reset_index(drop=True))
def test_timegrouper_apply_return_type_value(self):
# Using `apply` with the `TimeGrouper` should give the
# same return type as an `apply` with a `Grouper`.
# Issue #11742
df = pd.DataFrame({'date': ['10/10/2000', '11/10/2000'],
'value': [10, 13]})
df_dt = df.copy()
df_dt['date'] = pd.to_datetime(df_dt['date'])
def sumfunc_value(x):
return x.value.sum()
expected = df.groupby(pd.Grouper(key='date')).apply(sumfunc_value)
result = (df_dt.groupby(pd.TimeGrouper(freq='M', key='date'))
.apply(sumfunc_value))
assert_series_equal(result.reset_index(drop=True),
expected.reset_index(drop=True))
def test_cumcount(self):
df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'])
g = df.groupby('A')
sg = g.A
expected = Series([0, 1, 2, 0, 3])
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_cumcount_empty(self):
ge = DataFrame().groupby(level=0)
se = Series().groupby(level=0)
e = Series(dtype='int64'
) # edge case, as this is usually considered float
assert_series_equal(e, ge.cumcount())
assert_series_equal(e, se.cumcount())
def test_cumcount_dupe_index(self):
df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'],
index=[0] * 5)
g = df.groupby('A')
sg = g.A
expected = Series([0, 1, 2, 0, 3], index=[0] * 5)
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_cumcount_mi(self):
mi = MultiIndex.from_tuples([[0, 1], [1, 2], [2, 2], [2, 2], [1, 0]])
df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'],
index=mi)
g = df.groupby('A')
sg = g.A
expected = Series([0, 1, 2, 0, 3], index=mi)
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_cumcount_groupby_not_col(self):
df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'],
index=[0] * 5)
g = df.groupby([0, 0, 0, 1, 0])
sg = g.A
expected = Series([0, 1, 2, 0, 3], index=[0] * 5)
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_filter_series(self):
s = pd.Series([1, 3, 20, 5, 22, 24, 7])
expected_odd = pd.Series([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = pd.Series([20, 22, 24], index=[2, 4, 5])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
assert_series_equal(
grouped.filter(lambda x: x.mean() < 10), expected_odd)
assert_series_equal(
grouped.filter(lambda x: x.mean() > 10), expected_even)
# Test dropna=False.
assert_series_equal(
grouped.filter(lambda x: x.mean() < 10, dropna=False),
expected_odd.reindex(s.index))
assert_series_equal(
grouped.filter(lambda x: x.mean() > 10, dropna=False),
expected_even.reindex(s.index))
def test_filter_single_column_df(self):
df = pd.DataFrame([1, 3, 20, 5, 22, 24, 7])
expected_odd = pd.DataFrame([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = pd.DataFrame([20, 22, 24], index=[2, 4, 5])
grouper = df[0].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
assert_frame_equal(
grouped.filter(lambda x: x.mean() < 10), expected_odd)
assert_frame_equal(
grouped.filter(lambda x: x.mean() > 10), expected_even)
# Test dropna=False.
assert_frame_equal(
grouped.filter(lambda x: x.mean() < 10, dropna=False),
expected_odd.reindex(df.index))
assert_frame_equal(
grouped.filter(lambda x: x.mean() > 10, dropna=False),
expected_even.reindex(df.index))
def test_filter_multi_column_df(self):
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': [1, 1, 1, 1]})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
expected = pd.DataFrame({'A': [12, 12], 'B': [1, 1]}, index=[1, 2])
assert_frame_equal(
grouped.filter(lambda x: x['A'].sum() - x['B'].sum() > 10),
expected)
def test_filter_mixed_df(self):
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
expected = pd.DataFrame({'A': [12, 12], 'B': ['b', 'c']}, index=[1, 2])
assert_frame_equal(
grouped.filter(lambda x: x['A'].sum() > 10), expected)
def test_filter_out_all_groups(self):
s = pd.Series([1, 3, 20, 5, 22, 24, 7])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
assert_series_equal(grouped.filter(lambda x: x.mean() > 1000), s[[]])
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
assert_frame_equal(
grouped.filter(lambda x: x['A'].sum() > 1000), df.ix[[]])
def test_filter_out_no_groups(self):
s = pd.Series([1, 3, 20, 5, 22, 24, 7])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
filtered = grouped.filter(lambda x: x.mean() > 0)
assert_series_equal(filtered, s)
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
filtered = grouped.filter(lambda x: x['A'].mean() > 0)
assert_frame_equal(filtered, df)
def test_filter_out_all_groups_in_df(self):
# GH12768
df = pd.DataFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
res = df.groupby('a')
res = res.filter(lambda x: x['b'].sum() > 5, dropna=False)
expected = pd.DataFrame({'a': [nan] * 3, 'b': [nan] * 3})
assert_frame_equal(expected, res)
df = pd.DataFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
res = df.groupby('a')
res = res.filter(lambda x: x['b'].sum() > 5, dropna=True)
expected = pd.DataFrame({'a': [], 'b': []}, dtype="int64")
assert_frame_equal(expected, res)
def test_filter_condition_raises(self):
def raise_if_sum_is_zero(x):
if x.sum() == 0:
raise ValueError
else:
return x.sum() > 0
s = pd.Series([-1, 0, 1, 2])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
self.assertRaises(TypeError,
lambda: grouped.filter(raise_if_sum_is_zero))
def test_filter_with_axis_in_groupby(self):
# issue 11041
index = pd.MultiIndex.from_product([range(10), [0, 1]])
data = pd.DataFrame(
np.arange(100).reshape(-1, 20), columns=index, dtype='int64')
result = data.groupby(level=0,
axis=1).filter(lambda x: x.iloc[0, 0] > 10)
expected = data.iloc[:, 12:20]
assert_frame_equal(result, expected)
def test_filter_bad_shapes(self):
df = DataFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
s = df['B']
g_df = df.groupby('B')
g_s = s.groupby(s)
f = lambda x: x
self.assertRaises(TypeError, lambda: g_df.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
f = lambda x: x == 1
self.assertRaises(TypeError, lambda: g_df.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
f = lambda x: np.outer(x, x)
self.assertRaises(TypeError, lambda: g_df.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
def test_filter_nan_is_false(self):
df = DataFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
s = df['B']
g_df = df.groupby(df['B'])
g_s = s.groupby(s)
f = lambda x: np.nan
assert_frame_equal(g_df.filter(f), df.loc[[]])
assert_series_equal(g_s.filter(f), s[[]])
def test_filter_against_workaround(self):
np.random.seed(0)
# Series of ints
s = Series(np.random.randint(0, 100, 1000))
grouper = s.apply(lambda x: np.round(x, -1))
grouped = s.groupby(grouper)
f = lambda x: x.mean() > 10
old_way = s[grouped.transform(f).astype('bool')]
new_way = grouped.filter(f)
assert_series_equal(new_way.sort_values(), old_way.sort_values())
# Series of floats
s = 100 * Series(np.random.random(1000))
grouper = s.apply(lambda x: np.round(x, -1))
grouped = s.groupby(grouper)
f = lambda x: x.mean() > 10
old_way = s[grouped.transform(f).astype('bool')]
new_way = grouped.filter(f)
assert_series_equal(new_way.sort_values(), old_way.sort_values())
# Set up DataFrame of ints, floats, strings.
from string import ascii_lowercase
letters = np.array(list(ascii_lowercase))
N = 1000
random_letters = letters.take(np.random.randint(0, 26, N))
df = DataFrame({'ints': Series(np.random.randint(0, 100, N)),
'floats': N / 10 * Series(np.random.random(N)),
'letters': Series(random_letters)})
# Group by ints; filter on floats.
grouped = df.groupby('ints')
old_way = df[grouped.floats.
transform(lambda x: x.mean() > N / 20).astype('bool')]
new_way = grouped.filter(lambda x: x['floats'].mean() > N / 20)
assert_frame_equal(new_way, old_way)
# Group by floats (rounded); filter on strings.
grouper = df.floats.apply(lambda x: np.round(x, -1))
grouped = df.groupby(grouper)
old_way = df[grouped.letters.
transform(lambda x: len(x) < N / 10).astype('bool')]
new_way = grouped.filter(lambda x: len(x.letters) < N / 10)
assert_frame_equal(new_way, old_way)
# Group by strings; filter on ints.
grouped = df.groupby('letters')
old_way = df[grouped.ints.
transform(lambda x: x.mean() > N / 20).astype('bool')]
new_way = grouped.filter(lambda x: x['ints'].mean() > N / 20)
assert_frame_equal(new_way, old_way)
def test_filter_using_len(self):
# BUG GH4447
df = DataFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
grouped = df.groupby('B')
actual = grouped.filter(lambda x: len(x) > 2)
expected = DataFrame(
{'A': np.arange(2, 6),
'B': list('bbbb'),
'C': np.arange(2, 6)}, index=np.arange(2, 6))
assert_frame_equal(actual, expected)
actual = grouped.filter(lambda x: len(x) > 4)
expected = df.ix[[]]
assert_frame_equal(actual, expected)
# Series have always worked properly, but we'll test anyway.
s = df['B']
grouped = s.groupby(s)
actual = grouped.filter(lambda x: len(x) > 2)
expected = Series(4 * ['b'], index=np.arange(2, 6), name='B')
assert_series_equal(actual, expected)
actual = grouped.filter(lambda x: len(x) > 4)
expected = s[[]]
assert_series_equal(actual, expected)
def test_filter_maintains_ordering(self):
# Simple case: index is sequential. #4621
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]})
s = df['pid']
grouped = df.groupby('tag')
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.groupby(df['tag'])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_series_equal(actual, expected)
# Now index is sequentially decreasing.
df.index = np.arange(len(df) - 1, -1, -1)
s = df['pid']
grouped = df.groupby('tag')
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.groupby(df['tag'])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_series_equal(actual, expected)
# Index is shuffled.
SHUFFLED = [4, 6, 7, 2, 1, 0, 5, 3]
df.index = df.index[SHUFFLED]
s = df['pid']
grouped = df.groupby('tag')
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.groupby(df['tag'])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_series_equal(actual, expected)
def test_filter_multiple_timestamp(self):
# GH 10114
df = DataFrame({'A': np.arange(5, dtype='int64'),
'B': ['foo', 'bar', 'foo', 'bar', 'bar'],
'C': Timestamp('20130101')})
grouped = df.groupby(['B', 'C'])
result = grouped['A'].filter(lambda x: True)
assert_series_equal(df['A'], result)
result = grouped['A'].transform(len)
expected = Series([2, 3, 2, 3, 3], name='A')
assert_series_equal(result, expected)
result = grouped.filter(lambda x: True)
assert_frame_equal(df, result)
result = grouped.transform('sum')
expected = DataFrame({'A': [2, 8, 2, 8, 8]})
assert_frame_equal(result, expected)
result = grouped.transform(len)
expected = DataFrame({'A': [2, 3, 2, 3, 3]})
assert_frame_equal(result, expected)
def test_filter_and_transform_with_non_unique_int_index(self):
# GH4620
index = [1, 1, 1, 2, 1, 1, 0, 1]
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_multiple_non_unique_int_index(self):
# GH4620
index = [1, 1, 1, 2, 0, 0, 0, 1]
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_float_index(self):
# GH4620
index = np.array([1, 1, 1, 2, 1, 1, 0, 1], dtype=float)
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_timestamp_index(self):
# GH4620
t0 = Timestamp('2013-09-30 00:05:00')
t1 = Timestamp('2013-10-30 00:05:00')
t2 = Timestamp('2013-11-30 00:05:00')
index = [t1, t1, t1, t2, t1, t1, t0, t1]
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_string_index(self):
# GH4620
index = list('bbbcbbab')
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_has_access_to_grouped_cols(self):
df = DataFrame([[1, 2], [1, 3], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
# previously didn't have access to col A #????
filt = g.filter(lambda x: x['A'].sum() == 2)
assert_frame_equal(filt, df.iloc[[0, 1]])
def test_filter_enforces_scalarness(self):
df = pd.DataFrame([
['best', 'a', 'x'],
['worst', 'b', 'y'],
['best', 'c', 'x'],
['best', 'd', 'y'],
['worst', 'd', 'y'],
['worst', 'd', 'y'],
['best', 'd', 'z'],
], columns=['a', 'b', 'c'])
with tm.assertRaisesRegexp(TypeError, 'filter function returned a.*'):
df.groupby('c').filter(lambda g: g['a'] == 'best')
def test_filter_non_bool_raises(self):
df = pd.DataFrame([
['best', 'a', 1],
['worst', 'b', 1],
['best', 'c', 1],
['best', 'd', 1],
['worst', 'd', 1],
['worst', 'd', 1],
['best', 'd', 1],
], columns=['a', 'b', 'c'])
with tm.assertRaisesRegexp(TypeError, 'filter function returned a.*'):
df.groupby('a').filter(lambda g: g.c.mean())
def test_fill_constistency(self):
# GH9221
# pass thru keyword arguments to the generated wrapper
# are set if the passed kw is None (only)
df = DataFrame(index=pd.MultiIndex.from_product(
[['value1', 'value2'], date_range('2014-01-01', '2014-01-06')]),
columns=Index(
['1', '2'], name='id'))
df['1'] = [np.nan, 1, np.nan, np.nan, 11, np.nan, np.nan, 2, np.nan,
np.nan, 22, np.nan]
df['2'] = [np.nan, 3, np.nan, np.nan, 33, np.nan, np.nan, 4, np.nan,
np.nan, 44, np.nan]
expected = df.groupby(level=0, axis=0).fillna(method='ffill')
result = df.T.groupby(level=0, axis=1).fillna(method='ffill').T
assert_frame_equal(result, expected)
def test_index_label_overlaps_location(self):
# checking we don't have any label/location confusion in the
# the wake of GH5375
df = DataFrame(list('ABCDE'), index=[2, 0, 2, 1, 1])
g = df.groupby(list('ababb'))
actual = g.filter(lambda x: len(x) > 2)
expected = df.iloc[[1, 3, 4]]
assert_frame_equal(actual, expected)
ser = df[0]
g = ser.groupby(list('ababb'))
actual = g.filter(lambda x: len(x) > 2)
expected = ser.take([1, 3, 4])
assert_series_equal(actual, expected)
# ... and again, with a generic Index of floats
df.index = df.index.astype(float)
g = df.groupby(list('ababb'))
actual = g.filter(lambda x: len(x) > 2)
expected = df.iloc[[1, 3, 4]]
assert_frame_equal(actual, expected)
ser = df[0]
g = ser.groupby(list('ababb'))
actual = g.filter(lambda x: len(x) > 2)
expected = ser.take([1, 3, 4])
assert_series_equal(actual, expected)
def test_groupby_selection_with_methods(self):
# some methods which require DatetimeIndex
rng = pd.date_range('2014', periods=len(self.df))
self.df.index = rng
g = self.df.groupby(['A'])[['C']]
g_exp = self.df[['C']].groupby(self.df['A'])
# TODO check groupby with > 1 col ?
# methods which are called as .foo()
methods = ['count',
'corr',
'cummax',
'cummin',
'cumprod',
'describe',
'rank',
'quantile',
'diff',
'shift',
'all',
'any',
'idxmin',
'idxmax',
'ffill',
'bfill',
'pct_change',
'tshift']
for m in methods:
res = getattr(g, m)()
exp = getattr(g_exp, m)()
assert_frame_equal(res, exp) # should always be frames!
# methods which aren't just .foo()
assert_frame_equal(g.fillna(0), g_exp.fillna(0))
assert_frame_equal(g.dtypes, g_exp.dtypes)
assert_frame_equal(g.apply(lambda x: x.sum()),
g_exp.apply(lambda x: x.sum()))
assert_frame_equal(g.resample('D').mean(), g_exp.resample('D').mean())
assert_frame_equal(g.resample('D').ohlc(),
g_exp.resample('D').ohlc())
assert_frame_equal(g.filter(lambda x: len(x) == 3),
g_exp.filter(lambda x: len(x) == 3))
def test_groupby_whitelist(self):
from string import ascii_lowercase
letters = np.array(list(ascii_lowercase))
N = 10
random_letters = letters.take(np.random.randint(0, 26, N))
df = DataFrame({'floats': N / 10 * Series(np.random.random(N)),
'letters': Series(random_letters)})
s = df.floats
df_whitelist = frozenset([
'last',
'first',
'mean',
'sum',
'min',
'max',
'head',
'tail',
'cumsum',
'cumprod',
'cummin',
'cummax',
'cumcount',
'resample',
'describe',
'rank',
'quantile',
'fillna',
'mad',
'any',
'all',
'take',
'idxmax',
'idxmin',
'shift',
'tshift',
'ffill',
'bfill',
'pct_change',
'skew',
'plot',
'boxplot',
'hist',
'median',
'dtypes',
'corrwith',
'corr',
'cov',
'diff',
])
s_whitelist = frozenset([
'last',
'first',
'mean',
'sum',
'min',
'max',
'head',
'tail',
'cumsum',
'cumprod',
'cummin',
'cummax',
'cumcount',
'resample',
'describe',
'rank',
'quantile',
'fillna',
'mad',
'any',
'all',
'take',
'idxmax',
'idxmin',
'shift',
'tshift',
'ffill',
'bfill',
'pct_change',
'skew',
'plot',
'hist',
'median',
'dtype',
'corr',
'cov',
'diff',
'unique',
# 'nlargest', 'nsmallest',
])
for obj, whitelist in zip((df, s), (df_whitelist, s_whitelist)):
gb = obj.groupby(df.letters)
self.assertEqual(whitelist, gb._apply_whitelist)
for m in whitelist:
getattr(type(gb), m)
AGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew',
'mad', 'std', 'var', 'sem']
AGG_FUNCTIONS_WITH_SKIPNA = ['skew', 'mad']
def test_groupby_whitelist_deprecations(self):
from string import ascii_lowercase
letters = np.array(list(ascii_lowercase))
N = 10
random_letters = letters.take(np.random.randint(0, 26, N))
df = DataFrame({'floats': N / 10 * Series(np.random.random(N)),
'letters': Series(random_letters)})
# 10711 deprecated
with tm.assert_produces_warning(FutureWarning):
df.groupby('letters').irow(0)
with tm.assert_produces_warning(FutureWarning):
df.groupby('letters').floats.irow(0)
def test_regression_whitelist_methods(self):
# GH6944
# explicity test the whitelest methods
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
raw_frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
raw_frame.ix[1, [1, 2]] = np.nan
raw_frame.ix[7, [0, 1]] = np.nan
for op, level, axis, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2), lrange(2),
[True, False]):
if axis == 0:
frame = raw_frame
else:
frame = raw_frame.T
if op in self.AGG_FUNCTIONS_WITH_SKIPNA:
grouped = frame.groupby(level=level, axis=axis)
result = getattr(grouped, op)(skipna=skipna)
expected = getattr(frame, op)(level=level, axis=axis,
skipna=skipna)
assert_frame_equal(result, expected)
else:
grouped = frame.groupby(level=level, axis=axis)
result = getattr(grouped, op)()
expected = getattr(frame, op)(level=level, axis=axis)
assert_frame_equal(result, expected)
def test_groupby_blacklist(self):
from string import ascii_lowercase
letters = np.array(list(ascii_lowercase))
N = 10
random_letters = letters.take(np.random.randint(0, 26, N))
df = DataFrame({'floats': N / 10 * Series(np.random.random(N)),
'letters': Series(random_letters)})
s = df.floats
blacklist = [
'eval', 'query', 'abs', 'where',
'mask', 'align', 'groupby', 'clip', 'astype',
'at', 'combine', 'consolidate', 'convert_objects',
]
to_methods = [method for method in dir(df) if method.startswith('to_')]
blacklist.extend(to_methods)
# e.g., to_csv
defined_but_not_allowed = ("(?:^Cannot.+{0!r}.+{1!r}.+try using the "
"'apply' method$)")
# e.g., query, eval
not_defined = "(?:^{1!r} object has no attribute {0!r}$)"
fmt = defined_but_not_allowed + '|' + not_defined
for bl in blacklist:
for obj in (df, s):
gb = obj.groupby(df.letters)
msg = fmt.format(bl, type(gb).__name__)
with tm.assertRaisesRegexp(AttributeError, msg):
getattr(gb, bl)
def test_tab_completion(self):
grp = self.mframe.groupby(level='second')
results = set([v for v in dir(grp) if not v.startswith('_')])
expected = set(
['A', 'B', 'C', 'agg', 'aggregate', 'apply', 'boxplot', 'filter',
'first', 'get_group', 'groups', 'hist', 'indices', 'last', 'max',
'mean', 'median', 'min', 'name', 'ngroups', 'nth', 'ohlc', 'plot',
'prod', 'size', 'std', 'sum', 'transform', 'var', 'sem', 'count',
'head', 'irow', 'describe', 'cummax', 'quantile', 'rank',
'cumprod', 'tail', 'resample', 'cummin', 'fillna', 'cumsum',
'cumcount', 'all', 'shift', 'skew', 'bfill', 'ffill', 'take',
'tshift', 'pct_change', 'any', 'mad', 'corr', 'corrwith', 'cov',
'dtypes', 'ndim', 'diff', 'idxmax', 'idxmin',
'ffill', 'bfill', 'pad', 'backfill', 'rolling', 'expanding'])
self.assertEqual(results, expected)
def test_lexsort_indexer(self):
keys = [[nan] * 5 + list(range(100)) + [nan] * 5]
# orders=True, na_position='last'
result = _lexsort_indexer(keys, orders=True, na_position='last')
expected = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
assert_equal(result, expected)
# orders=True, na_position='first'
result = _lexsort_indexer(keys, orders=True, na_position='first')
expected = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
assert_equal(result, expected)
# orders=False, na_position='last'
result = _lexsort_indexer(keys, orders=False, na_position='last')
expected = list(range(104, 4, -1)) + list(range(5)) + list(range(105,
110))
assert_equal(result, expected)
# orders=False, na_position='first'
result = _lexsort_indexer(keys, orders=False, na_position='first')
expected = list(range(5)) + list(range(105, 110)) + list(range(104, 4,
-1))
assert_equal(result, expected)
def test_nargsort(self):
# np.argsort(items) places NaNs last
items = [nan] * 5 + list(range(100)) + [nan] * 5
# np.argsort(items2) may not place NaNs first
items2 = np.array(items, dtype='O')
try:
# GH 2785; due to a regression in NumPy1.6.2
np.argsort(np.array([[1, 2], [1, 3], [1, 2]], dtype='i'))
np.argsort(items2, kind='mergesort')
except TypeError:
raise nose.SkipTest('requested sort not available for type')
# mergesort is the most difficult to get right because we want it to be
# stable.
# According to numpy/core/tests/test_multiarray, """The number of
# sorted items must be greater than ~50 to check the actual algorithm
# because quick and merge sort fall over to insertion sort for small
# arrays."""
# mergesort, ascending=True, na_position='last'
result = _nargsort(items, kind='mergesort', ascending=True,
na_position='last')
expected = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
assert_equal(result, expected)
# mergesort, ascending=True, na_position='first'
result = _nargsort(items, kind='mergesort', ascending=True,
na_position='first')
expected = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
assert_equal(result, expected)
# mergesort, ascending=False, na_position='last'
result = _nargsort(items, kind='mergesort', ascending=False,
na_position='last')
expected = list(range(104, 4, -1)) + list(range(5)) + list(range(105,
110))
assert_equal(result, expected)
# mergesort, ascending=False, na_position='first'
result = _nargsort(items, kind='mergesort', ascending=False,
na_position='first')
expected = list(range(5)) + list(range(105, 110)) + list(range(104, 4,
-1))
assert_equal(result, expected)
# mergesort, ascending=True, na_position='last'
result = _nargsort(items2, kind='mergesort', ascending=True,
na_position='last')
expected = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
assert_equal(result, expected)
# mergesort, ascending=True, na_position='first'
result = _nargsort(items2, kind='mergesort', ascending=True,
na_position='first')
expected = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
assert_equal(result, expected)
# mergesort, ascending=False, na_position='last'
result = _nargsort(items2, kind='mergesort', ascending=False,
na_position='last')
expected = list(range(104, 4, -1)) + list(range(5)) + list(range(105,
110))
assert_equal(result, expected)
# mergesort, ascending=False, na_position='first'
result = _nargsort(items2, kind='mergesort', ascending=False,
na_position='first')
expected = list(range(5)) + list(range(105, 110)) + list(range(104, 4,
-1))
assert_equal(result, expected)
def test_datetime_count(self):
df = DataFrame({'a': [1, 2, 3] * 2,
'dates': pd.date_range('now', periods=6, freq='T')})
result = df.groupby('a').dates.count()
expected = Series([
2, 2, 2
], index=Index([1, 2, 3], name='a'), name='dates')
tm.assert_series_equal(result, expected)
def test_lower_int_prec_count(self):
df = DataFrame({'a': np.array(
[0, 1, 2, 100], np.int8),
'b': np.array(
[1, 2, 3, 6], np.uint32),
'c': np.array(
[4, 5, 6, 8], np.int16),
'grp': list('ab' * 2)})
result = df.groupby('grp').count()
expected = DataFrame({'a': [2, 2],
'b': [2, 2],
'c': [2, 2]}, index=pd.Index(list('ab'),
name='grp'))
tm.assert_frame_equal(result, expected)
def test_count_uses_size_on_exception(self):
class RaisingObjectException(Exception):
pass
class RaisingObject(object):
def __init__(self, msg='I will raise inside Cython'):
super(RaisingObject, self).__init__()
self.msg = msg
def __eq__(self, other):
# gets called in Cython to check that raising calls the method
raise RaisingObjectException(self.msg)
df = DataFrame({'a': [RaisingObject() for _ in range(4)],
'grp': list('ab' * 2)})
result = df.groupby('grp').count()
expected = DataFrame({'a': [2, 2]}, index=pd.Index(
list('ab'), name='grp'))
tm.assert_frame_equal(result, expected)
def test__cython_agg_general(self):
ops = [('mean', np.mean),
('median', np.median),
('var', np.var),
('add', np.sum),
('prod', np.prod),
('min', np.min),
('max', np.max),
('first', lambda x: x.iloc[0]),
('last', lambda x: x.iloc[-1]), ]
df = DataFrame(np.random.randn(1000))
labels = np.random.randint(0, 50, size=1000).astype(float)
for op, targop in ops:
result = df.groupby(labels)._cython_agg_general(op)
expected = df.groupby(labels).agg(targop)
try:
tm.assert_frame_equal(result, expected)
except BaseException as exc:
exc.args += ('operation: %s' % op, )
raise
def test_cython_group_transform_algos(self):
# GH 4095
dtypes = [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint32,
np.uint64, np.float32, np.float64]
ops = [(pd.algos.group_cumprod_float64, np.cumproduct, [np.float64]),
(pd.algos.group_cumsum, np.cumsum, dtypes)]
for pd_op, np_op, dtypes in ops:
for dtype in dtypes:
data = np.array([[1], [2], [3], [4]], dtype=dtype)
ans = np.zeros_like(data)
accum = np.array([[0]], dtype=dtype)
labels = np.array([0, 0, 0, 0], dtype=np.int64)
pd_op(ans, data, labels, accum)
self.assert_numpy_array_equal(np_op(data), ans[:, 0])
# with nans
labels = np.array([0, 0, 0, 0, 0], dtype=np.int64)
data = np.array([[1], [2], [3], [np.nan], [4]], dtype='float64')
accum = np.array([[0.0]])
actual = np.zeros_like(data)
actual.fill(np.nan)
pd.algos.group_cumprod_float64(actual, data, labels, accum)
expected = np.array([1, 2, 6, np.nan, 24], dtype='float64')
self.assert_numpy_array_equal(actual[:, 0], expected)
accum = np.array([[0.0]])
actual = np.zeros_like(data)
actual.fill(np.nan)
pd.algos.group_cumsum(actual, data, labels, accum)
expected = np.array([1, 3, 6, np.nan, 10], dtype='float64')
self.assert_numpy_array_equal(actual[:, 0], expected)
# timedelta
data = np.array([np.timedelta64(1, 'ns')] * 5, dtype='m8[ns]')[:, None]
accum = np.array([[0]], dtype='int64')
actual = np.zeros_like(data, dtype='int64')
pd.algos.group_cumsum(actual, data.view('int64'), labels, accum)
expected = np.array([np.timedelta64(1, 'ns'), np.timedelta64(
2, 'ns'), np.timedelta64(3, 'ns'), np.timedelta64(4, 'ns'),
np.timedelta64(5, 'ns')])
self.assert_numpy_array_equal(actual[:, 0].view('m8[ns]'), expected)
def test_cython_transform(self):
# GH 4095
ops = [(('cumprod',
()), lambda x: x.cumprod()), (('cumsum', ()),
lambda x: x.cumsum()),
(('shift', (-1, )),
lambda x: x.shift(-1)), (('shift',
(1, )), lambda x: x.shift())]
s = Series(np.random.randn(1000))
s_missing = s.copy()
s_missing.iloc[2:10] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
# series
for (op, args), targop in ops:
for data in [s, s_missing]:
# print(data.head())
expected = data.groupby(labels).transform(targop)
tm.assert_series_equal(expected,
data.groupby(labels).transform(op,
*args))
tm.assert_series_equal(expected, getattr(
data.groupby(labels), op)(*args))
strings = list('qwertyuiopasdfghjklz')
strings_missing = strings[:]
strings_missing[5] = np.nan
df = DataFrame({'float': s,
'float_missing': s_missing,
'int': [1, 1, 1, 1, 2] * 200,
'datetime': pd.date_range('1990-1-1', periods=1000),
'timedelta': pd.timedelta_range(1, freq='s',
periods=1000),
'string': strings * 50,
'string_missing': strings_missing * 50})
df['cat'] = df['string'].astype('category')
df2 = df.copy()
df2.index = pd.MultiIndex.from_product([range(100), range(10)])
# DataFrame - Single and MultiIndex,
# group by values, index level, columns
for df in [df, df2]:
for gb_target in [dict(by=labels), dict(level=0), dict(by='string')
]: # dict(by='string_missing')]:
# dict(by=['int','string'])]:
gb = df.groupby(**gb_target)
# whitelisted methods set the selection before applying
# bit a of hack to make sure the cythonized shift
# is equivalent to pre 0.17.1 behavior
if op == 'shift':
gb._set_selection_from_grouper()
for (op, args), targop in ops:
if op != 'shift' and 'int' not in gb_target:
# numeric apply fastpath promotes dtype so have
# to apply seperately and concat
i = gb[['int']].apply(targop)
f = gb[['float', 'float_missing']].apply(targop)
expected = pd.concat([f, i], axis=1)
else:
expected = gb.apply(targop)
expected = expected.sort_index(axis=1)
tm.assert_frame_equal(expected,
gb.transform(op, *args).sort_index(
axis=1))
tm.assert_frame_equal(expected, getattr(gb, op)(*args))
# individual columns
for c in df:
if c not in ['float', 'int', 'float_missing'
] and op != 'shift':
self.assertRaises(DataError, gb[c].transform, op)
self.assertRaises(DataError, getattr(gb[c], op))
else:
expected = gb[c].apply(targop)
expected.name = c
tm.assert_series_equal(expected,
gb[c].transform(op, *args))
tm.assert_series_equal(expected,
getattr(gb[c], op)(*args))
def test_groupby_cumprod(self):
# GH 4095
df = pd.DataFrame({'key': ['b'] * 10, 'value': 2})
actual = df.groupby('key')['value'].cumprod()
expected = df.groupby('key')['value'].apply(lambda x: x.cumprod())
expected.name = 'value'
tm.assert_series_equal(actual, expected)
df = pd.DataFrame({'key': ['b'] * 100, 'value': 2})
actual = df.groupby('key')['value'].cumprod()
# if overflows, groupby product casts to float
# while numpy passes back invalid values
df['value'] = df['value'].astype(float)
expected = df.groupby('key')['value'].apply(lambda x: x.cumprod())
expected.name = 'value'
tm.assert_series_equal(actual, expected)
def test_ops_general(self):
ops = [('mean', np.mean),
('median', np.median),
('std', np.std),
('var', np.var),
('sum', np.sum),
('prod', np.prod),
('min', np.min),
('max', np.max),
('first', lambda x: x.iloc[0]),
('last', lambda x: x.iloc[-1]),
('count', np.size), ]
try:
from scipy.stats import sem
except ImportError:
pass
else:
ops.append(('sem', sem))
df = DataFrame(np.random.randn(1000))
labels = np.random.randint(0, 50, size=1000).astype(float)
for op, targop in ops:
result = getattr(df.groupby(labels), op)().astype(float)
expected = df.groupby(labels).agg(targop)
try:
tm.assert_frame_equal(result, expected)
except BaseException as exc:
exc.args += ('operation: %s' % op, )
raise
def test_max_nan_bug(self):
raw = """,Date,app,File
2013-04-23,2013-04-23 00:00:00,,log080001.log
2013-05-06,2013-05-06 00:00:00,,log.log
2013-05-07,2013-05-07 00:00:00,OE,xlsx"""
df = pd.read_csv(StringIO(raw), parse_dates=[0])
gb = df.groupby('Date')
r = gb[['File']].max()
e = gb['File'].max().to_frame()
tm.assert_frame_equal(r, e)
self.assertFalse(r['File'].isnull().any())
def test_nlargest(self):
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list('a' * 5 + 'b' * 5))
gb = a.groupby(b)
r = gb.nlargest(3)
e = Series([
7, 5, 3, 10, 9, 6
], index=MultiIndex.from_arrays([list('aaabbb'), [3, 2, 1, 9, 5, 8]]))
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series([
3, 2, 1, 3, 3, 2
], index=MultiIndex.from_arrays([list('aaabbb'), [2, 3, 1, 6, 5, 7]]))
assert_series_equal(gb.nlargest(3, keep='last'), e)
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(gb.nlargest(3, take_last=True), e)
def test_nsmallest(self):
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list('a' * 5 + 'b' * 5))
gb = a.groupby(b)
r = gb.nsmallest(3)
e = Series([
1, 2, 3, 0, 4, 6
], index=MultiIndex.from_arrays([list('aaabbb'), [0, 4, 1, 6, 7, 8]]))
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series([
0, 1, 1, 0, 1, 2
], index=MultiIndex.from_arrays([list('aaabbb'), [4, 1, 0, 9, 8, 7]]))
assert_series_equal(gb.nsmallest(3, keep='last'), e)
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(gb.nsmallest(3, take_last=True), e)
def test_transform_doesnt_clobber_ints(self):
# GH 7972
n = 6
x = np.arange(n)
df = DataFrame({'a': x // 2, 'b': 2.0 * x, 'c': 3.0 * x})
df2 = DataFrame({'a': x // 2 * 1.0, 'b': 2.0 * x, 'c': 3.0 * x})
gb = df.groupby('a')
result = gb.transform('mean')
gb2 = df2.groupby('a')
expected = gb2.transform('mean')
tm.assert_frame_equal(result, expected)
def test_groupby_categorical_two_columns(self):
# https://github.com/pydata/pandas/issues/8138
d = {'cat':
pd.Categorical(["a", "b", "a", "b"], categories=["a", "b", "c"],
ordered=True),
'ints': [1, 1, 2, 2],
'val': [10, 20, 30, 40]}
test = pd.DataFrame(d)
# Grouping on a single column
groups_single_key = test.groupby("cat")
res = groups_single_key.agg('mean')
exp = DataFrame({"ints": [1.5, 1.5, np.nan], "val": [20, 30, np.nan]},
index=pd.CategoricalIndex(["a", "b", "c"], name="cat"))
tm.assert_frame_equal(res, exp)
# Grouping on two columns
groups_double_key = test.groupby(["cat", "ints"])
res = groups_double_key.agg('mean')
exp = DataFrame({"val": [10, 30, 20, 40, np.nan, np.nan],
"cat": ["a", "a", "b", "b", "c", "c"],
"ints": [1, 2, 1, 2, 1, 2]}).set_index(["cat", "ints"
])
tm.assert_frame_equal(res, exp)
# GH 10132
for key in [('a', 1), ('b', 2), ('b', 1), ('a', 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = test[(test.cat == c) & (test.ints == i)]
assert_frame_equal(result, expected)
d = {'C1': [3, 3, 4, 5], 'C2': [1, 2, 3, 4], 'C3': [10, 100, 200, 34]}
test = pd.DataFrame(d)
values = pd.cut(test['C1'], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = test.groupby([values, 'C2'])
res = groups_double_key.agg('mean')
nan = np.nan
idx = MultiIndex.from_product([["(1, 2]", "(2, 3]", "(3, 6]"],
[1, 2, 3, 4]],
names=["cat", "C2"])
exp = DataFrame({"C1": [nan, nan, nan, nan, 3, 3,
nan, nan, nan, nan, 4, 5],
"C3": [nan, nan, nan, nan, 10, 100,
nan, nan, nan, nan, 200, 34]}, index=idx)
tm.assert_frame_equal(res, exp)
def test_groupby_apply_all_none(self):
# Tests to make sure no errors if apply function returns all None
# values. Issue 9684.
test_df = DataFrame({'groups': [0, 0, 1, 1],
'random_vars': [8, 7, 4, 5]})
def test_func(x):
pass
result = test_df.groupby('groups').apply(test_func)
expected = DataFrame()
tm.assert_frame_equal(result, expected)
def test_first_last_max_min_on_time_data(self):
# GH 10295
# Verify that NaT is not in the result of max, min, first and last on
# Dataframe with datetime or timedelta values.
from datetime import timedelta as td
df_test = DataFrame(
{'dt': [nan, '2015-07-24 10:10', '2015-07-25 11:11',
'2015-07-23 12:12', nan],
'td': [nan, td(days=1), td(days=2), td(days=3), nan]})
df_test.dt = pd.to_datetime(df_test.dt)
df_test['group'] = 'A'
df_ref = df_test[df_test.dt.notnull()]
grouped_test = df_test.groupby('group')
grouped_ref = df_ref.groupby('group')
assert_frame_equal(grouped_ref.max(), grouped_test.max())
assert_frame_equal(grouped_ref.min(), grouped_test.min())
assert_frame_equal(grouped_ref.first(), grouped_test.first())
assert_frame_equal(grouped_ref.last(), grouped_test.last())
def test_groupby_preserves_sort(self):
# Test to ensure that groupby always preserves sort order of original
# object. Issue #8588 and #9651
df = DataFrame(
{'int_groups': [3, 1, 0, 1, 0, 3, 3, 3],
'string_groups': ['z', 'a', 'z', 'a', 'a', 'g', 'g', 'g'],
'ints': [8, 7, 4, 5, 2, 9, 1, 1],
'floats': [2.3, 5.3, 6.2, -2.4, 2.2, 1.1, 1.1, 5],
'strings': ['z', 'd', 'a', 'e', 'word', 'word2', '42', '47']})
# Try sorting on different types and with different group types
for sort_column in ['ints', 'floats', 'strings', ['ints', 'floats'],
['ints', 'strings']]:
for group_column in ['int_groups', 'string_groups',
['int_groups', 'string_groups']]:
df = df.sort_values(by=sort_column)
g = df.groupby(group_column)
def test_sort(x):
assert_frame_equal(x, x.sort_values(by=sort_column))
g.apply(test_sort)
def test_nunique_with_object(self):
# GH 11077
data = pd.DataFrame(
[[100, 1, 'Alice'],
[200, 2, 'Bob'],
[300, 3, 'Charlie'],
[-400, 4, 'Dan'],
[500, 5, 'Edith']],
columns=['amount', 'id', 'name']
)
result = data.groupby(['id', 'amount'])['name'].nunique()
index = MultiIndex.from_arrays([data.id, data.amount])
expected = pd.Series([1] * 5, name='name', index=index)
tm.assert_series_equal(result, expected)
def test_transform_with_non_scalar_group(self):
# GH 10165
cols = pd.MultiIndex.from_tuples([
('syn', 'A'), ('mis', 'A'), ('non', 'A'),
('syn', 'C'), ('mis', 'C'), ('non', 'C'),
('syn', 'T'), ('mis', 'T'), ('non', 'T'),
('syn', 'G'), ('mis', 'G'), ('non', 'G')])
df = pd.DataFrame(np.random.randint(1, 10, (4, 12)),
columns=cols,
index=['A', 'C', 'G', 'T'])
self.assertRaisesRegexp(ValueError, 'transform must return a scalar '
'value for each group.*', df.groupby
(axis=1, level=1).transform,
lambda z: z.div(z.sum(axis=1), axis=0))
def assert_fp_equal(a, b):
assert (np.abs(a - b) < 1e-12).all()
def _check_groupby(df, result, keys, field, f=lambda x: x.sum()):
tups = lmap(tuple, df[keys].values)
tups = com._asarray_tuplesafe(tups)
expected = f(df.groupby(tups)[field])
for k, v in compat.iteritems(expected):
assert (result[k] == v)
def test_decons():
from pandas.core.groupby import decons_group_index, get_group_index
def testit(label_list, shape):
group_index = get_group_index(label_list, shape, sort=True, xnull=True)
label_list2 = decons_group_index(group_index, shape)
for a, b in zip(label_list, label_list2):
assert (np.array_equal(a, b))
shape = (4, 5, 6)
label_list = [np.tile([0, 1, 2, 3, 0, 1, 2, 3], 100), np.tile(
[0, 2, 4, 3, 0, 1, 2, 3], 100), np.tile(
[5, 1, 0, 2, 3, 0, 5, 4], 100)]
testit(label_list, shape)
shape = (10000, 10000)
label_list = [np.tile(np.arange(10000), 5), np.tile(np.arange(10000), 5)]
testit(label_list, shape)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure', '-s'
], exit=False)
| mit |
DiracInstitute/kbmod | analysis/trajectoryFiltering.py | 1 | 8204 | from sklearn.cluster import DBSCAN
import numpy as np
import operator as op
from functools import reduce
def ncr(n, r):
r = min(r, n-r)
if r == 0: return 1
numer = reduce(op.mul, range(n, n-r, -1))
denom = reduce(op.mul, range(1, r+1))
return numer//denom
def maximum_expected_detections(im_count, min_obs,
mask_amount, actual_expected):
max_expected_fraction = 0
for masked_count in range(im_count-min_obs+1):
max_expected_fraction += ncr(im_count, masked_count) * \
(mask_amount)**masked_count * \
(1-mask_amount)**(im_count-masked_count)
return max_expected_fraction*actual_expected
def add_trajectory(image_list, tr, psf, times):
init_time = times[0]
for i,t_on in zip(image_list, times):
t = t_on - init_time
i.add_object( tr.x+tr.x_v*t, tr.y+tr.y_v*t, tr.flux, psf )
def compare_trajectory(a, b, v_thresh, pix_thresh):
# compare flux too?
if (b.obs_count == 0 and
abs(a.x-b.x)<=pix_thresh and
abs(a.y-b.y)<=pix_thresh and
abs(a.x_v/b.x_v-1)<v_thresh and
abs(a.y_v/b.y_v-1)<v_thresh):
b.obs_count += 1
return True
else:
return False
def compare_trajectory_once(a, b, v_thresh, pix_thresh):
# compare flux too?
if (
abs(a.x-b.x)<=pix_thresh and
abs(a.y-b.y)<=pix_thresh and
abs(a.x_v/b.x_v-1)<v_thresh and
abs(a.y_v/b.y_v-1)<v_thresh):
return True
else:
return False
def match_trajectories(results_list, test_list, v_thresh, pix_thresh):
matches = []
unmatched = []
for r in results_list:
if any(compare_trajectory(r, test, v_thresh, pix_thresh)
for test in test_list):
matches.append(r)
for t in test_list:
if (t.obs_count == 0):
unmatched.append(t)
t.obs_count = 0
return matches, unmatched
# adapted from analyzeImage.py
def cluster_trajectories( results, dbscan_args=None):
"""
Use scikit-learn algorithm of density-based spatial clustering of
applications with noise (DBSCAN)
(http://scikit-learn.org/stable/modules/generated/
sklearn.cluster.DBSCAN.html)
to cluster the results of the likelihood image search using starting
location, total velocity and slope of trajectory.
Parameters
----------
results: numpy recarray, required
The results output from findObjects in searchImage.
dbscan_args: dict, optional
Additional arguments for the DBSCAN instance. See options in link
above.
Returns
-------
db_cluster: DBSCAN instance
DBSCAN instance with clustering completed. To get cluster labels use
db_cluster.labels_
top_vals: list of integers
The indices in the results array where the most likely object in each
cluster is located.
"""
default_dbscan_args = dict(eps=0.1, min_samples=1)
if dbscan_args is not None:
default_dbscan_args.update(dbscan_args)
dbscan_args = default_dbscan_args
slope_arr = []
intercept_arr = []
t0x_arr = []
t0y_arr = []
vel_total_arr = []
vx_arr = []
vel_x_arr = []
vel_y_arr = []
for r in results:
t0x_arr.append(r.x)
t0y_arr.append(r.y)
vel_x_arr.append(r.x_v)
vel_y_arr.append(r.y_v)
db_cluster = DBSCAN(**dbscan_args)
scaled_t0x = t0x_arr - np.min(t0x_arr)
if np.max(scaled_t0x) > 0.:
scaled_t0x = scaled_t0x/np.max(scaled_t0x)
scaled_t0y = t0y_arr - np.min(t0y_arr)
if np.max(scaled_t0y) > 0.:
scaled_t0y = scaled_t0y/np.max(scaled_t0y)
scaled_vx = vel_x_arr - np.min(vel_x_arr)
if np.max(scaled_vx) > 0.:
scaled_vx /= np.max(scaled_vx)
scaled_vy = vel_y_arr - np.min(vel_y_arr)
if np.max(scaled_vy) > 0.:
scaled_vy /= np.max(scaled_vy)
db_cluster.fit(np.array([scaled_t0x, scaled_t0y,
scaled_vx, scaled_vy
], dtype=np.float).T)
top_vals = []
for cluster_num in np.unique(db_cluster.labels_):
cluster_vals = np.where(db_cluster.labels_ == cluster_num)[0]
top_vals.append(cluster_vals[0])
return db_cluster, top_vals
def calc_centers(t, timeArr):
#ix, iy = zip(*[(t.x,t.y) for t in trajectories] )
#xv, yv = zip(*[(t.x_v, t.y_v) for t in trajectories] )
#startLocArr = np.array( [np.array(ix), np.array(iy)] )
#velArr = np.array( [np.array(xv), np.array(yv)] )
startLocArr = np.array( [t.x, t.y] )
#print(startLocArr)
velArr = np.array( [t.x_v, t.y_v] )
centerArr = []
for time in timeArr:
centerArr.append(startLocArr + (velArr*time))
return np.array(centerArr)
def create_postage_stamp(imageArray, traj,
timeArr, stamp_width):
"""
Create postage stamp image coadds of potential objects traveling along
a trajectory.
Parameters
----------
imageArray: numpy array, required
The masked input images.
objectStartArr: numpy array, required
An array with the starting location of the object in pixels.
velArr: numpy array, required
The x,y velocity in pixels/hr. of the object trajectory.
timeArr: numpy array, required
The time in hours of each image starting from 0 at the first image.
stamp_width: numpy array or list, [2], required
The row, column dimensions of the desired output image.
Returns
-------
stampImage: numpy array
The coadded postage stamp.
singleImagesArray: numpy array
The postage stamps that were added together to create the coadd.
"""
singleImagesArray = []
stampWidth = np.array(stamp_width, dtype=int)
#print stampWidth
stampImage = np.zeros(stampWidth)
#if len(np.shape(imageArray)) < 3:
# imageArray = [imageArray]
measureCoords = calc_centers(traj, timeArr)
#print(measureCoords)
if len(np.shape(measureCoords)) < 2:
measureCoords = [measureCoords]
off_edge = []
for centerCoords in measureCoords:
#print((centerCoords[0] + stampWidth[0]/2 + 1) )
#print( np.shape(imageArray[0])[1])
if (centerCoords[0] + stampWidth[0]/2 + 1) > np.shape(imageArray[0])[1]:
#raise ValueError('The boundaries of your postage stamp for one of the images go off the edge')
off_edge.append(True)
elif (centerCoords[0] - stampWidth[0]/2) < 0:
#raise ValueError('The boundaries of your postage stamp for one of the images go off the edge')
off_edge.append(True)
elif (centerCoords[1] + stampWidth[1]/2 + 1) > np.shape(imageArray[0])[0]:
#raise ValueError('The boundaries of your postage stamp for one of the images go off the edge')
off_edge.append(True)
elif (centerCoords[1] - stampWidth[1]/2) < 0:
#raise ValueError('The boundaries of your postage stamp for one of the images go off the edge')
off_edge.append(True)
else:
off_edge.append(False)
i=0
for image in imageArray:
if off_edge[i] is False:
xmin = int(np.rint(measureCoords[i,1]-stampWidth[0]/2))
xmax = int(xmin + stampWidth[0])
ymin = int(np.rint(measureCoords[i,0]-stampWidth[1]/2))
ymax = int(ymin + stampWidth[1])
#print xmin, xmax, ymin, ymax
single_stamp = image[xmin:xmax, ymin:ymax]
single_stamp[np.isnan(single_stamp)] = 0.
single_stamp[np.isinf(single_stamp)] = 0.
single_stamp[single_stamp < -9000.] = 0.
#if len(np.where(single_stamp == 0.)[0]) > 221.:
# singleImagesArray.append(single_stamp)
# continue
stampImage += single_stamp
singleImagesArray.append(single_stamp)
else:
single_stamp = np.zeros((stampWidth))
singleImagesArray.append(single_stamp)
i+=1
return stampImage, singleImagesArray
| bsd-2-clause |
DANA-Laboratory/CoolProp | wrappers/Python/CoolProp/GUI/CoolPropGUI.py | 4 | 17019 | import wx
import wx.grid
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as WXCanvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2Wx as WXToolbar
import matplotlib as mpl
import CoolProp as CP
from CoolProp.Plots.Plots import Ph, Ts
from CoolProp.Plots import PsychChart
import numpy as np
# Munge the system path if necessary to add the lib folder (only really needed
# for packaging using cx_Freeze)
#if os.path.exists('lib') and os.path.abspath(os.path.join(os.curdir,'lib')) not in os.:
class PlotPanel(wx.Panel):
def __init__(self, parent, **kwargs):
wx.Panel.__init__(self, parent, **kwargs)
sizer = wx.BoxSizer(wx.VERTICAL)
self.figure = mpl.figure.Figure(dpi=100)
self.canvas = WXCanvas(self, -1, self.figure)
self.ax = self.figure.add_axes((0.15,0.15,0.8,0.8))
#self.toolbar = WXToolbar(self.canvas)
#self.toolbar.Realize()
sizer.Add(self.canvas,1,wx.EXPAND)
#sizer.Add(self.toolbar)
self.SetSizer(sizer)
sizer.Layout()
class TSPlotFrame(wx.Frame):
def __init__(self, Fluid):
wx.Frame.__init__(self, None,title='T-s plot: '+Fluid)
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.PP = PlotPanel(self, size = (-1,-1))
sizer.Add(self.PP, 1, wx.EXPAND)
self.SetSizer(sizer)
Ts(str(Fluid),
axis = self.PP.ax,
Tmin = CP.CoolProp.Props(str(Fluid),'Ttriple')+0.01)
sizer.Layout()
self.add_menu()
def add_menu(self):
# Menu Bar
self.MenuBar = wx.MenuBar()
self.File = wx.Menu()
mnuItem = wx.MenuItem(self.File, -1, "Edit...", "", wx.ITEM_NORMAL)
self.File.AppendItem(mnuItem)
self.MenuBar.Append(self.File, "File")
self.SetMenuBar(self.MenuBar)
class PsychOptions(wx.Dialog):
def __init__(self,parent):
wx.Dialog.__init__(self,parent)
self.build_contents()
self.layout()
def build_contents(self):
self.p_label = wx.StaticText(self,label='Pressure [kPa (absolute)]')
self.p = wx.TextCtrl(self,value = '101.325')
self.Tmin_label = wx.StaticText(self,label='Minimum dry bulb temperature [\xb0 C]')
self.Tmin = wx.TextCtrl(self,value = '-10')
self.Tmax_label = wx.StaticText(self,label='Maximum dry bulb temperature [\xb0 C]')
self.Tmax = wx.TextCtrl(self,value = '60')
self.GoButton = wx.Button(self,label='Accept')
self.GoButton.Bind(wx.EVT_BUTTON,self.OnAccept)
def OnAccept(self, event):
self.EndModal(wx.ID_OK)
def layout(self):
sizer = wx.FlexGridSizer(cols = 2)
sizer.AddMany([self.p_label,self.p,self.Tmin_label,self.Tmin,self.Tmax_label,self.Tmax])
sizer.Add(self.GoButton)
sizer.Layout()
self.Fit()
class PsychPlotFrame(wx.Frame):
def __init__(self,Tmin = 263.15,Tmax=333.15,p = 101.325, **kwargs):
wx.Frame.__init__(self, None, title='Psychrometric plot', **kwargs)
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.PP = PlotPanel(self)
self.PP.figure.delaxes(self.PP.ax)
self.PP.ax = self.PP.figure.add_axes((0.1,0.1,0.85,0.85))
sizer.Add(self.PP, 1, wx.EXPAND)
self.SetSizer(sizer)
PsychChart.p = p
PsychChart.Tdb = np.linspace(Tmin,Tmax)
SL = PsychChart.SaturationLine()
SL.plot(self.PP.ax)
RHL = PsychChart.HumidityLines([0.05,0.1,0.15,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9])
RHL.plot(self.PP.ax)
HL = PsychChart.EnthalpyLines(range(-20,100,10))
HL.plot(self.PP.ax)
PF = PsychChart.PlotFormatting()
PF.plot(self.PP.ax)
sizer.Layout()
self.add_menu()
self.PP.toolbar = WXToolbar(self.PP.canvas)
self.PP.toolbar.Realize()
self.PP.GetSizer().Add(self.PP.toolbar)
self.PP.Layout()
def add_menu(self):
# Menu Bar
self.MenuBar = wx.MenuBar()
self.File = wx.Menu()
mnuItem = wx.MenuItem(self.File, -1, "Edit...", "", wx.ITEM_NORMAL)
self.File.AppendItem(mnuItem)
self.MenuBar.Append(self.File, "File")
self.SetMenuBar(self.MenuBar)
class PHPlotFrame(wx.Frame):
def __init__(self, Fluid):
wx.Frame.__init__(self, None,title='p-h plot: '+Fluid)
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.PP = PlotPanel(self, size = (-1,-1))
sizer.Add(self.PP, 1, wx.EXPAND)
self.SetSizer(sizer)
Ph(str(Fluid),
axis = self.PP.ax,
Tmin = CP.CoolProp.Props(str(Fluid),'Ttriple')+0.01)
sizer.Layout()
self.add_menu()
def add_menu(self):
# Menu Bar
self.MenuBar = wx.MenuBar()
self.File = wx.Menu()
mnuItem = wx.MenuItem(self.File, -1, "Edit...", "", wx.ITEM_NORMAL)
self.File.AppendItem(mnuItem)
self.MenuBar.Append(self.File, "File")
self.SetMenuBar(self.MenuBar)
def overlay_points(self):
pass
def overlay_cycle(self):
pass
class SimpleGrid(wx.grid.Grid):
def __init__(self, parent, ncol = 20, nrow = 8):
wx.grid.Grid.__init__(self, parent)
self.CreateGrid(ncol, nrow)
[self.SetCellValue(i,j,'0.0') for i in range(20) for j in range(8)]
class SaturationTableDialog(wx.Dialog):
def __init__(self, parent):
wx.Dialog.__init__(self,parent)
self.FluidLabel = wx.StaticText(self,label = "Fluid")
self.FluidCombo = wx.ComboBox(self)
self.FluidCombo.AppendItems(sorted(CP.__fluids__))
self.FluidCombo.SetEditable(False)
self.TtripleLabel = wx.StaticText(self,label = "Critical Temperature [K]")
self.TtripleValue = wx.TextCtrl(self)
self.TtripleValue.Enable(False)
self.TcritLabel = wx.StaticText(self,label = "Critical Temperature [K]")
self.TcritValue = wx.TextCtrl(self)
self.TcritValue.Enable(False)
self.NvalsLabel = wx.StaticText(self,label = "Number of values")
self.NvalsValue = wx.TextCtrl(self)
self.TminLabel = wx.StaticText(self,label = "Minimum Temperature [K]")
self.TminValue = wx.TextCtrl(self)
self.TmaxLabel = wx.StaticText(self,label = "Maximum Temperature [K]")
self.TmaxValue = wx.TextCtrl(self)
self.Accept = wx.Button(self, label ="Accept")
sizer = wx.FlexGridSizer(cols = 2)
sizer.AddMany([self.FluidLabel,self.FluidCombo,
self.TtripleLabel,self.TtripleValue,
self.TcritLabel, self.TcritValue])
sizer.AddSpacer(10)
sizer.AddSpacer(10)
sizer.AddMany([self.NvalsLabel,self.NvalsValue,
self.TminLabel, self.TminValue,
self.TmaxLabel, self.TmaxValue])
sizer.Add(self.Accept)
self.Bind(wx.EVT_COMBOBOX, self.OnSelectFluid)
self.Bind(wx.EVT_BUTTON, self.OnAccept)
self.SetSizer(sizer)
sizer.Layout()
self.Fit()
#Bind a key-press event to all objects to get Esc
children = self.GetChildren()
for child in children:
child.Bind(wx.EVT_KEY_UP, self.OnKeyPress)
def OnKeyPress(self,event = None):
""" cancel if Escape key is pressed """
event.Skip()
if event.GetKeyCode() == wx.WXK_ESCAPE:
self.EndModal(wx.ID_CANCEL)
def get_values(self):
Fluid = str(self.FluidCombo.GetStringSelection())
if Fluid:
N = float(self.NvalsValue.GetValue())
Tmin = float(self.TminValue.GetValue())
Tmax = float(self.TmaxValue.GetValue())
Tvals = np.linspace(Tmin, Tmax, N)
return Fluid, Tvals
else:
return '',[]
def OnCheckTmin(self):
pass
def OnCheckTmax(self):
pass
def OnAccept(self, event = None):
self.EndModal(wx.ID_OK)
def OnSelectFluid(self, event = None):
Fluid = str(self.FluidCombo.GetStringSelection())
if Fluid:
Tcrit = CP.CoolProp.Props(Fluid,'Tcrit')
Ttriple = CP.CoolProp.Props(Fluid,'Ttriple')
self.TcritValue.SetValue(str(Tcrit))
self.TtripleValue.SetValue(str(Ttriple))
self.NvalsValue.SetValue('100')
self.TminValue.SetValue(str(Ttriple + 0.01))
self.TmaxValue.SetValue(str(Tcrit - 0.01))
class SaturationTable(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent)
self.Fluid, self.Tvals = self.OnSelect()
if self.Fluid:
self.tbl = SimpleGrid(self,
ncol = len(self.Tvals)
)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.tbl,1,wx.EXPAND)
self.SetSizer(sizer)
sizer.Layout()
self.build()
self.add_menu()
else:
self.Destroy()
def OnSelect(self, event = None):
dlg = SaturationTableDialog(None)
if dlg.ShowModal() == wx.ID_OK:
Fluid,Tvals = dlg.get_values()
cancel = False
else:
cancel = True
dlg.Destroy()
if not cancel:
return Fluid,Tvals
else:
return None,None
def build(self):
self.SetTitle('Saturation Table: '+self.Fluid)
self.tbl.SetColLabelValue(0, "Temperature\n[K]")
self.tbl.SetColLabelValue(1, "Liquid Pressure\n[kPa]")
self.tbl.SetColLabelValue(2, "Vapor Pressure\n[kPa]")
self.tbl.SetColLabelValue(3, "Liquid Density\n[kg/m3]")
self.tbl.SetColLabelValue(4, "Vapor Density\n[kg/m3]")
for i,T in enumerate(self.Tvals):
Fluid = self.Fluid
pL = CP.CoolProp.Props('P','T',T,'Q',0,Fluid)
pV = CP.CoolProp.Props('P','T',T,'Q',1,Fluid)
rhoL = CP.CoolProp.Props('D','T',T,'Q',0,Fluid)
rhoV = CP.CoolProp.Props('D','T',T,'Q',1,Fluid)
self.tbl.SetCellValue(i,0,str(T))
self.tbl.SetCellValue(i,1,str(pL))
self.tbl.SetCellValue(i,2,str(pV))
self.tbl.SetCellValue(i,3,str(rhoL))
self.tbl.SetCellValue(i,4,str(rhoV))
def add_menu(self):
# Menu Bar
self.MenuBar = wx.MenuBar()
self.File = wx.Menu()
mnuItem0 = wx.MenuItem(self.File, -1, "Select All \tCtrl+A", "", wx.ITEM_NORMAL)
mnuItem1 = wx.MenuItem(self.File, -1, "Copy selected data \tCtrl+C", "", wx.ITEM_NORMAL)
mnuItem2 = wx.MenuItem(self.File, -1, "Copy table w/ headers \tCtrl+H", "", wx.ITEM_NORMAL)
self.File.AppendItem(mnuItem0)
self.File.AppendItem(mnuItem1)
self.File.AppendItem(mnuItem2)
self.MenuBar.Append(self.File, "Edit")
self.Bind(wx.EVT_MENU, lambda event: self.tbl.SelectAll(), mnuItem0)
self.Bind(wx.EVT_MENU, self.OnCopy, mnuItem1)
self.Bind(wx.EVT_MENU, self.OnCopyHeaders, mnuItem2)
self.SetMenuBar(self.MenuBar)
def OnCopy(self, event = None):
# Number of rows and cols
rows = self.tbl.GetSelectionBlockBottomRight()[0][0] - self.tbl.GetSelectionBlockTopLeft()[0][0] + 1
cols = self.tbl.GetSelectionBlockBottomRight()[0][1] - self.tbl.GetSelectionBlockTopLeft()[0][1] + 1
# data variable contain text that must be set in the clipboard
data = ''
# For each cell in selected range append the cell value in the data variable
# Tabs '\t' for cols and '\r' for rows
for r in range(rows):
for c in range(cols):
data = data + str(self.tbl.GetCellValue(self.tbl.GetSelectionBlockTopLeft()[0][0] + r, self.tbl.GetSelectionBlockTopLeft()[0][1] + c))
if c < cols - 1:
data = data + '\t'
data = data + '\n'
# Create text data object
clipboard = wx.TextDataObject()
# Set data object value
clipboard.SetText(data)
# Put the data in the clipboard
if wx.TheClipboard.Open():
wx.TheClipboard.SetData(clipboard)
wx.TheClipboard.Close()
else:
wx.MessageBox("Can't open the clipboard", "Error")
event.Skip()
def OnCopyHeaders(self, event = None):
self.tbl.SelectAll()
# Number of rows and cols
rows = self.tbl.GetSelectionBlockBottomRight()[0][0] - self.tbl.GetSelectionBlockTopLeft()[0][0] + 1
cols = self.tbl.GetSelectionBlockBottomRight()[0][1] - self.tbl.GetSelectionBlockTopLeft()[0][1] + 1
# data variable contain text that must be set in the clipboard
data = ''
#Add the headers
for c in range(cols):
data += str(self.tbl.GetColLabelValue(c).replace('\n',' ') )
if c < cols - 1:
data += '\t'
data = data + '\n'
# For each cell in selected range append the cell value in the data variable
# Tabs '\t' for cols and '\r' for rows
for r in range(rows):
for c in range(cols):
data = data + str(self.tbl.GetCellValue(self.tbl.GetSelectionBlockTopLeft()[0][0] + r, self.tbl.GetSelectionBlockTopLeft()[0][1] + c))
if c < cols - 1:
data = data + '\t'
data = data + '\n'
# Create text data object
clipboard = wx.TextDataObject()
# Set data object value
clipboard.SetText(data)
# Put the data in the clipboard
if wx.TheClipboard.Open():
wx.TheClipboard.SetData(clipboard)
wx.TheClipboard.Close()
else:
wx.MessageBox("Can't open the clipboard", "Error")
event.Skip()
class MainFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self,None)
self.build()
def build(self):
# Menu Bar
self.MenuBar = wx.MenuBar()
self.plots = wx.Menu()
self.PHPlot = wx.Menu()
self.TSPlot = wx.Menu()
self.tables = wx.Menu()
self.PsychPlot = wx.MenuItem(self.plots,-1,'Psychrometric Plot')
self.SatTable = wx.MenuItem(self.tables, -1,' Saturation Table', "", wx.ITEM_NORMAL)
for Fluid in sorted(CP.__fluids__):
mnuItem = wx.MenuItem(self.PHPlot, -1, Fluid, "", wx.ITEM_NORMAL)
self.PHPlot.AppendItem(mnuItem)
self.Bind(wx.EVT_MENU, lambda event: self.OnPHPlot(event, mnuItem), mnuItem)
mnuItem = wx.MenuItem(self.TSPlot, -1, Fluid, "", wx.ITEM_NORMAL)
self.TSPlot.AppendItem(mnuItem)
self.Bind(wx.EVT_MENU, lambda event: self.OnTSPlot(event, mnuItem), mnuItem)
self.MenuBar.Append(self.plots, "Plots")
self.plots.AppendItem(self.PsychPlot)
self.plots.AppendMenu(-1,'p-h plot', self.PHPlot)
self.plots.AppendMenu(-1,'T-s plot', self.TSPlot)
self.MenuBar.Append(self.tables, "Tables")
self.tables.AppendItem(self.SatTable)
self.Bind(wx.EVT_MENU, self.OnSatTable, self.SatTable)
self.Bind(wx.EVT_MENU, self.OnPsychPlot, self.PsychPlot)
self.SetMenuBar(self.MenuBar)
def OnPsychPlot(self, event=None):
#Load the options
dlg = PsychOptions(None)
if dlg.ShowModal() == wx.ID_OK:
Tmin = float(dlg.Tmin.GetValue())+273.15
Tmax = float(dlg.Tmax.GetValue())+273.15
p = float(dlg.p.GetValue())
PPF = PsychPlotFrame(Tmin = Tmin, Tmax = Tmax, p = p, size = (1000,700))
PPF.Show()
dlg.Destroy()
def OnSatTable(self,event):
TBL = SaturationTable(None)
TBL.Show()
def OnPHPlot(self, event, mnuItem):
#Make a p-h plot instance in a new frame
#Get the label (Fluid name)
Fluid = self.PHPlot.FindItemById(event.Id).Label
PH = PHPlotFrame(Fluid)
PH.Show()
def OnTSPlot(self, event, mnuItem):
#Make a p-h plot instance in a new frame
#Get the label (Fluid name)
Fluid = self.TSPlot.FindItemById(event.Id).Label
TS = TSPlotFrame(Fluid)
TS.Show()
if __name__=='__main__':
app = wx.App(False)
wx.InitAllImageHandlers()
frame = MainFrame()
frame.Show(True)
app.MainLoop() | mit |
ywcui1990/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/numerix/__init__.py | 69 | 5473 | """
numerix imports either Numeric or numarray based on various selectors.
0. If the value "--numpy","--numarray" or "--Numeric" is specified on the
command line, then numerix imports the specified
array package.
1. The value of numerix in matplotlibrc: either Numeric or numarray
2. If none of the above is done, the default array package is Numeric.
Because the matplotlibrc always provides *some* value for numerix
(it has it's own system of default values), this default is most
likely never used.
To summarize: the commandline is examined first, the rc file second,
and the default array package is Numeric.
"""
import sys, os, struct
from matplotlib import rcParams, verbose
which = None, None
use_maskedarray = None
# First, see if --numarray or --Numeric was specified on the command
# line:
for a in sys.argv:
if a in ["--Numeric", "--numeric", "--NUMERIC",
"--Numarray", "--numarray", "--NUMARRAY",
"--NumPy", "--numpy", "--NUMPY", "--Numpy",
]:
which = a[2:], "command line"
if a == "--maskedarray":
use_maskedarray = True
if a == "--ma":
use_maskedarray = False
try: del a
except NameError: pass
if which[0] is None:
try: # In theory, rcParams always has *some* value for numerix.
which = rcParams['numerix'], "rc"
except KeyError:
pass
if use_maskedarray is None:
try:
use_maskedarray = rcParams['maskedarray']
except KeyError:
use_maskedarray = False
# If all the above fail, default to Numeric. Most likely not used.
if which[0] is None:
which = "numeric", "defaulted"
which = which[0].strip().lower(), which[1]
if which[0] not in ["numeric", "numarray", "numpy"]:
raise ValueError("numerix selector must be either 'Numeric', 'numarray', or 'numpy' but the value obtained from the %s was '%s'." % (which[1], which[0]))
if which[0] == "numarray":
import warnings
warnings.warn("numarray use as a numerix backed for matplotlib is deprecated",
DeprecationWarning, stacklevel=1)
#from na_imports import *
from numarray import *
from _na_imports import nx, inf, infinity, Infinity, Matrix, isnan, all
from numarray.numeric import nonzero
from numarray.convolve import cross_correlate, convolve
import numarray
version = 'numarray %s'%numarray.__version__
nan = struct.unpack('d', struct.pack('Q', 0x7ff8000000000000))[0]
elif which[0] == "numeric":
import warnings
warnings.warn("Numeric use as a numerix backed for matplotlib is deprecated",
DeprecationWarning, stacklevel=1)
#from nc_imports import *
from Numeric import *
from _nc_imports import nx, inf, infinity, Infinity, isnan, all, any
from Matrix import Matrix
import Numeric
version = 'Numeric %s'%Numeric.__version__
nan = struct.unpack('d', struct.pack('Q', 0x7ff8000000000000))[0]
elif which[0] == "numpy":
try:
import numpy.oldnumeric as numpy
from numpy.oldnumeric import *
except ImportError:
import numpy
from numpy import *
print 'except asarray', asarray
from _sp_imports import nx, infinity, rand, randn, isnan, all, any
from _sp_imports import UInt8, UInt16, UInt32, Infinity
try:
from numpy.oldnumeric.matrix import Matrix
except ImportError:
Matrix = matrix
version = 'numpy %s' % numpy.__version__
from numpy import nan
else:
raise RuntimeError("invalid numerix selector")
# Some changes are only applicable to the new numpy:
if (which[0] == 'numarray' or
which[0] == 'numeric'):
from mlab import amin, amax
newaxis = NewAxis
def typecode(a):
return a.typecode()
def iscontiguous(a):
return a.iscontiguous()
def byteswapped(a):
return a.byteswapped()
def itemsize(a):
return a.itemsize()
def angle(a):
return arctan2(a.imag, a.real)
else:
# We've already checked for a valid numerix selector,
# so assume numpy.
from mlab import amin, amax
newaxis = NewAxis
from numpy import angle
def typecode(a):
return a.dtype.char
def iscontiguous(a):
return a.flags.contiguous
def byteswapped(a):
return a.byteswap()
def itemsize(a):
return a.itemsize
verbose.report('numerix %s'%version)
# a bug fix for blas numeric suggested by Fernando Perez
matrixmultiply=dot
asum = sum
def _import_fail_message(module, version):
"""Prints a message when the array package specific version of an extension
fails to import correctly.
"""
_dict = { "which" : which[0],
"module" : module,
"specific" : version + module
}
print """
The import of the %(which)s version of the %(module)s module,
%(specific)s, failed. This is is either because %(which)s was
unavailable when matplotlib was compiled, because a dependency of
%(specific)s could not be satisfied, or because the build flag for
this module was turned off in setup.py. If it appears that
%(specific)s was not built, make sure you have a working copy of
%(which)s and then re-install matplotlib. Otherwise, the following
traceback gives more details:\n""" % _dict
g = globals()
l = locals()
__import__('ma', g, l)
__import__('fft', g, l)
__import__('linear_algebra', g, l)
__import__('random_array', g, l)
__import__('mlab', g, l)
la = linear_algebra
ra = random_array
| agpl-3.0 |
abimannans/scikit-learn | examples/gaussian_process/gp_diabetes_dataset.py | 223 | 1976 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
In this example, we fit a Gaussian Process model onto the diabetes
dataset.
We determine the correlation parameters with maximum likelihood
estimation (MLE). We use an anisotropic squared exponential
correlation model with a constant regression model. We also use a
nugget of 1e-2 to account for the (strong) noise in the targets.
We compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta_ # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
| bsd-3-clause |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/examples/cluster/plot_segmentation_toy.py | 258 | 3336 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
sfcta/synthpop | demos/sfcta/synthesize_sfcta.py | 2 | 4155 |
# coding: utf-8
from sfcta_starter import SFCTAStarter
from sfcta_starter_hh import SFCTAStarterHouseholds
from sfcta_starter_gq import SFCTAStarterGroupQuarters
from synthpop.synthesizer import synthesize_all, enable_logging
import pandas as pd
import argparse
import os
import re
import sys
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Synthesize population given SFCTA-formatted input files.')
# parser.add_argument('census_api_key', help="Census API key.")
parser.add_argument('PUMA_data_dir', help="Location of PUMA data. E.g. Q:\Model Development\Population Synthesizer\2. Base Year Eval\PopGen input from ACS20082012\by_st_puma10")
parser.add_argument('fips_file', help="Census FIPS (Federal Information Processing Standards) file. Probably Q:\Data\Surveys\Census\PUMS&PUMA\national_county.txt")
parser.add_argument('controls_csv', help="Controls CSV file. Probably output by createControls.py in Q:\Model Development\Population Synthesizer\pythonlib")
parser.add_argument('--tazlist', help="A list of TAZs for which to synthesize the population. Comma-delimited, ranges ok. e.g. 1-10,12,20-30")
parser_args = parser.parse_args()
# This needs to end in a \
if parser_args.PUMA_data_dir[-1] != "\\":
parser_args.PUMA_data_dir = parser_args.PUMA_data_dir + "\\"
# No census API key needed since the files are local -- set it to a dummy
parser_args.census_api_key = "this_is_unused"
print "census_api_key = [%s]" % parser_args.census_api_key
print "PUMA_data_dir = [%s]" % parser_args.PUMA_data_dir
print "fips_file = [%s]" % parser_args.fips_file
print "controls_csv = [%s]" % parser_args.controls_csv
print "tazlist = [%s]" % parser_args.tazlist
# parse the TAZ set
taz_set = set()
if parser_args.tazlist != None:
range_re = re.compile("^(\d+)(\-(\d+))?$")
tazlist_str = parser_args.tazlist.split(",")
for taz_str in tazlist_str:
# each element must be either an int or a range
match = re.match(range_re, taz_str)
if match == None:
print "Don't understand tazlist argument '%s'" % parser_args.tazlist
print parser.format_help()
sys.exit(2)
if match.group(3) == None:
taz_set.add(int(match.group(1)))
else:
assert(int(match.group(3)) > int(match.group(1)))
taz_set.update(range(int(match.group(1)), int(match.group(3))+1))
print "taz_set = [%s]" % str(taz_set)
# enable_logging()
starter_hh = SFCTAStarterHouseholds(parser_args.census_api_key,
parser_args.controls_csv, taz_set,
parser_args.PUMA_data_dir, parser_args.fips_file,
write_households_csv="households.csv",
write_persons_csv="persons.csv")
households, people, fit_quality = synthesize_all(starter_hh, indexes=None)
gq_start_hhid = starter_hh.start_hhid
gq_start_persid = starter_hh.start_persid
# close the file
del starter_hh
starter_gq = SFCTAStarterGroupQuarters(parser_args.census_api_key,
parser_args.controls_csv, taz_set,
parser_args.PUMA_data_dir, parser_args.fips_file,
write_households_csv="households.csv",
write_persons_csv="persons.csv",
write_append=True,
start_hhid=gq_start_hhid,
start_persid=gq_start_persid)
households_gq, people_gq, fit_quality_gq = synthesize_all(starter_gq, indexes=None)
# close the file
del starter_gq
sys.exit()
for geo, qual in fit_quality.items():
print 'Geography: {}'.format(geo[0])
# print ' household chisq: {}'.format(qual.household_chisq)
# print ' household p: {}'.format(qual.household_p)
print ' people chisq: {}'.format(qual.people_chisq)
print ' people p: {}'.format(qual.people_p)
| bsd-3-clause |
huongttlan/statsmodels | statsmodels/regression/tests/test_lme.py | 19 | 25081 | import warnings
import numpy as np
import pandas as pd
from statsmodels.regression.mixed_linear_model import MixedLM, MixedLMParams
from numpy.testing import (assert_almost_equal, assert_equal, assert_allclose,
dec, assert_)
from . import lme_r_results
from statsmodels.base import _penalties as penalties
import statsmodels.tools.numdiff as nd
import os
import csv
# TODO: add tests with unequal group sizes
class R_Results(object):
"""
A class for holding various results obtained from fitting one data
set using lmer in R.
Parameters
----------
meth : string
Either "ml" or "reml".
irfs : string
Either "irf", for independent random effects, or "drf" for
dependent random effects.
ds_ix : integer
The number of the data set
"""
def __init__(self, meth, irfs, ds_ix):
bname = "_%s_%s_%d" % (meth, irfs, ds_ix)
self.coef = getattr(lme_r_results, "coef" + bname)
self.vcov_r = getattr(lme_r_results, "vcov" + bname)
self.cov_re_r = getattr(lme_r_results, "cov_re" + bname)
self.scale_r = getattr(lme_r_results, "scale" + bname)
self.loglike = getattr(lme_r_results, "loglike" + bname)
if hasattr(lme_r_results, "ranef_mean" + bname):
self.ranef_postmean = getattr(lme_r_results, "ranef_mean"
+ bname)
self.ranef_condvar = getattr(lme_r_results,
"ranef_condvar" + bname)
self.ranef_condvar = np.atleast_2d(self.ranef_condvar)
# Load the data file
cur_dir = os.path.dirname(os.path.abspath(__file__))
rdir = os.path.join(cur_dir, 'results')
fname = os.path.join(rdir, "lme%02d.csv" % ds_ix)
fid = open(fname)
rdr = csv.reader(fid)
header = next(rdr)
data = [[float(x) for x in line] for line in rdr]
data = np.asarray(data)
# Split into exog, endog, etc.
self.endog = data[:,header.index("endog")]
self.groups = data[:,header.index("groups")]
ii = [i for i,x in enumerate(header) if
x.startswith("exog_fe")]
self.exog_fe = data[:,ii]
ii = [i for i,x in enumerate(header) if
x.startswith("exog_re")]
self.exog_re = data[:,ii]
def loglike_function(model, profile_fe, has_fe):
"""
Returns a function that evaluates the negative log-likelihood for
the given model.
"""
def f(x):
params = MixedLMParams.from_packed(x, model.k_fe, model.k_re, model.use_sqrt, has_fe=has_fe)
return -model.loglike(params, profile_fe=profile_fe)
return f
def score_function(model, profile_fe, has_fe):
"""
Returns a function that evaluates the negative score function for
the given model.
"""
def f(x):
params = MixedLMParams.from_packed(x, model.k_fe, model.use_sqrt, has_fe=not profile_fe)
return -model.score(params, profile_fe=profile_fe)
return f
class TestMixedLM(object):
# Test analytic scores and Hessian using numeric differentiation
@dec.slow
def test_compare_numdiff(self):
n_grp = 200
grpsize = 5
k_fe = 3
k_re = 2
for use_sqrt in False,True:
for reml in False,True:
for profile_fe in False,True:
np.random.seed(3558)
exog_fe = np.random.normal(size=(n_grp*grpsize, k_fe))
exog_re = np.random.normal(size=(n_grp*grpsize, k_re))
exog_re[:, 0] = 1
exog_vc = np.random.normal(size=(n_grp*grpsize, 3))
slopes = np.random.normal(size=(n_grp, k_re))
slopes[:, -1] *= 2
slopes = np.kron(slopes, np.ones((grpsize,1)))
slopes_vc = np.random.normal(size=(n_grp, 3))
slopes_vc = np.kron(slopes_vc, np.ones((grpsize,1)))
slopes_vc[:, -1] *= 2
re_values = (slopes * exog_re).sum(1)
vc_values = (slopes_vc * exog_vc).sum(1)
err = np.random.normal(size=n_grp*grpsize)
endog = exog_fe.sum(1) + re_values + vc_values + err
groups = np.kron(range(n_grp), np.ones(grpsize))
vc = {"a": {}, "b": {}}
for i in range(n_grp):
ix = np.flatnonzero(groups == i)
vc["a"][i] = exog_vc[ix, 0:2]
vc["b"][i] = exog_vc[ix, 2:3]
model = MixedLM(endog, exog_fe, groups, exog_re, exog_vc=vc, use_sqrt=use_sqrt)
rslt = model.fit(reml=reml)
loglike = loglike_function(model, profile_fe=profile_fe, has_fe=not profile_fe)
score = score_function(model, profile_fe=profile_fe, has_fe=not profile_fe)
# Test the score at several points.
for kr in range(5):
fe_params = np.random.normal(size=k_fe)
cov_re = np.random.normal(size=(k_re, k_re))
cov_re = np.dot(cov_re.T, cov_re)
vcomp = np.random.normal(size=2)**2
params = MixedLMParams.from_components(fe_params, cov_re=cov_re, vcomp=vcomp)
params_vec = params.get_packed(has_fe=not profile_fe, use_sqrt=use_sqrt)
# Check scores
gr = -model.score(params, profile_fe=profile_fe)
ngr = nd.approx_fprime(params_vec, loglike)
assert_allclose(gr, ngr, rtol=1e-3)
# Check Hessian matrices at the MLE (we don't have
# the profile Hessian matrix and we don't care
# about the Hessian for the square root
# transformed parameter).
if (profile_fe == False) and (use_sqrt == False):
hess = -model.hessian(rslt.params_object)
params_vec = rslt.params_object.get_packed(use_sqrt=False, has_fe=True)
loglike_h = loglike_function(model, profile_fe=False, has_fe=True)
nhess = nd.approx_hess(params_vec, loglike_h)
assert_allclose(hess, nhess, rtol=1e-3)
def test_default_re(self):
np.random.seed(3235)
exog = np.random.normal(size=(300,4))
groups = np.kron(np.arange(100), [1,1,1])
g_errors = np.kron(np.random.normal(size=100), [1,1,1])
endog = exog.sum(1) + g_errors + np.random.normal(size=300)
mdf1 = MixedLM(endog, exog, groups).fit()
mdf2 = MixedLM(endog, exog, groups, np.ones(300)).fit()
assert_almost_equal(mdf1.params, mdf2.params, decimal=8)
def test_history(self):
np.random.seed(3235)
exog = np.random.normal(size=(300,4))
groups = np.kron(np.arange(100), [1,1,1])
g_errors = np.kron(np.random.normal(size=100), [1,1,1])
endog = exog.sum(1) + g_errors + np.random.normal(size=300)
mod = MixedLM(endog, exog, groups)
rslt = mod.fit(full_output=True)
assert_equal(hasattr(rslt, "hist"), True)
def test_profile_inference(self):
# Smoke test
np.random.seed(9814)
k_fe = 2
gsize = 3
n_grp = 100
exog = np.random.normal(size=(n_grp * gsize, k_fe))
exog_re = np.ones((n_grp * gsize, 1))
groups = np.kron(np.arange(n_grp), np.ones(gsize))
vca = np.random.normal(size=n_grp * gsize)
vcb = np.random.normal(size=n_grp * gsize)
errors = 0
g_errors = np.kron(np.random.normal(size=100), np.ones(gsize))
errors += g_errors + exog_re[:, 0]
rc = np.random.normal(size=n_grp)
errors += np.kron(rc, np.ones(gsize)) * vca
rc = np.random.normal(size=n_grp)
errors += np.kron(rc, np.ones(gsize)) * vcb
errors += np.random.normal(size=n_grp * gsize)
endog = exog.sum(1) + errors
vc = {"a" : {}, "b" : {}}
for k in range(n_grp):
ii = np.flatnonzero(groups == k)
vc["a"][k] = vca[ii][:, None]
vc["b"][k] = vcb[ii][:, None]
rslt = MixedLM(endog, exog, groups=groups, exog_re=exog_re, exog_vc=vc).fit()
prof_re = rslt.profile_re(0, vtype='re', dist_low=1, num_low=3, dist_high=1,
num_high=3)
prof_vc = rslt.profile_re('b', vtype='vc', dist_low=0.5, num_low=3, dist_high=0.5,
num_high=3)
# Fails on old versions of scipy/numpy
def txest_vcomp_1(self):
"""
Fit the same model using constrained random effects and variance components.
"""
np.random.seed(4279)
exog = np.random.normal(size=(400, 1))
exog_re = np.random.normal(size=(400, 2))
groups = np.kron(np.arange(100), np.ones(4))
slopes = np.random.normal(size=(100, 2))
slopes[:, 1] *= 2
slopes = np.kron(slopes, np.ones((4, 1))) * exog_re
errors = slopes.sum(1) + np.random.normal(size=400)
endog = exog.sum(1) + errors
free = MixedLMParams(1, 2, 0)
free.fe_params = np.ones(1)
free.cov_re = np.eye(2)
free.vcomp = np.zeros(0)
model1 = MixedLM(endog, exog, groups, exog_re=exog_re)
result1 = model1.fit(free=free)
exog_vc = {"a": {}, "b": {}}
for k,group in enumerate(model1.group_labels):
ix = model1.row_indices[group]
exog_vc["a"][group] = exog_re[ix, 0:1]
exog_vc["b"][group] = exog_re[ix, 1:2]
model2 = MixedLM(endog, exog, groups, exog_vc=exog_vc)
result2 = model2.fit()
result2.summary()
assert_allclose(result1.fe_params, result2.fe_params, atol=1e-4)
assert_allclose(np.diag(result1.cov_re), result2.vcomp, atol=1e-2, rtol=1e-4)
assert_allclose(result1.bse[[0, 1, 3]], result2.bse, atol=1e-2, rtol=1e-2)
def test_vcomp_2(self):
"""
Simulated data comparison to R
"""
np.random.seed(6241)
n = 1600
exog = np.random.normal(size=(n, 2))
ex_vc = []
groups = np.kron(np.arange(n / 16), np.ones(16))
# Build up the random error vector
errors = 0
# The random effects
exog_re = np.random.normal(size=(n, 2))
slopes = np.random.normal(size=(n / 16, 2))
slopes = np.kron(slopes, np.ones((16, 1))) * exog_re
errors += slopes.sum(1)
# First variance component
subgroups1 = np.kron(np.arange(n / 4), np.ones(4))
errors += np.kron(2*np.random.normal(size=n/4), np.ones(4))
# Second variance component
subgroups2 = np.kron(np.arange(n / 2), np.ones(2))
errors += np.kron(2*np.random.normal(size=n/2), np.ones(2))
# iid errors
errors += np.random.normal(size=n)
endog = exog.sum(1) + errors
df = pd.DataFrame(index=range(n))
df["y"] = endog
df["groups"] = groups
df["x1"] = exog[:, 0]
df["x2"] = exog[:, 1]
df["z1"] = exog_re[:, 0]
df["z2"] = exog_re[:, 1]
df["v1"] = subgroups1
df["v2"] = subgroups2
# Equivalent model in R:
# df.to_csv("tst.csv")
# model = lmer(y ~ x1 + x2 + (0 + z1 + z2 | groups) + (1 | v1) + (1 | v2), df)
vcf = {"a": "0 + C(v1)", "b": "0 + C(v2)"}
model1 = MixedLM.from_formula("y ~ x1 + x2", groups=groups, re_formula="0+z1+z2",
vc_formula=vcf, data=df)
result1 = model1.fit()
# Compare to R
assert_allclose(result1.fe_params, [0.16527, 0.99911, 0.96217], rtol=1e-4)
assert_allclose(result1.cov_re, [[1.244, 0.146], [0.146 , 1.371]], rtol=1e-3)
assert_allclose(result1.vcomp, [4.024, 3.997], rtol=1e-3)
assert_allclose(result1.bse.iloc[0:3], [0.12610, 0.03938, 0.03848], rtol=1e-3)
def test_sparse(self):
cur_dir = os.path.dirname(os.path.abspath(__file__))
rdir = os.path.join(cur_dir, 'results')
fname = os.path.join(rdir, 'pastes.csv')
# Dense
data = pd.read_csv(fname)
vcf = {"cask" : "0 + cask"}
model = MixedLM.from_formula("strength ~ 1", groups="batch",
re_formula="1", vc_formula=vcf,
data=data)
result = model.fit()
# Sparse
from scipy import sparse
model2 = MixedLM.from_formula("strength ~ 1", groups="batch",
re_formula="1", vc_formula=vcf,
use_sparse=True, data=data)
result2 = model.fit()
assert_allclose(result.params, result2.params)
assert_allclose(result.bse, result2.bse)
def test_pastes_vcomp(self):
"""
pastes data from lme4
Fit in R using formula:
strength ~ (1|batch) + (1|batch:cask)
"""
cur_dir = os.path.dirname(os.path.abspath(__file__))
rdir = os.path.join(cur_dir, 'results')
fname = os.path.join(rdir, 'pastes.csv')
# REML
data = pd.read_csv(fname)
vcf = {"cask" : "0 + cask"}
model = MixedLM.from_formula("strength ~ 1", groups="batch",
re_formula="1", vc_formula=vcf,
data=data)
result = model.fit()
assert_allclose(result.fe_params.iloc[0], 60.0533, rtol=1e-3)
assert_allclose(result.bse.iloc[0], 0.6769, rtol=1e-3)
assert_allclose(result.cov_re.iloc[0, 0], 1.657, rtol=1e-3)
assert_allclose(result.scale, 0.678, rtol=1e-3)
assert_allclose(result.llf, -123.49, rtol=1e-1)
assert_equal(result.aic, np.nan) # don't provide aic/bic with REML
assert_equal(result.bic, np.nan)
resid = np.r_[0.17133538, -0.02866462, -1.08662875, 1.11337125, -0.12093607]
assert_allclose(result.resid[0:5], resid, rtol=1e-3)
fit = np.r_[62.62866, 62.62866, 61.18663, 61.18663, 62.82094]
assert_allclose(result.fittedvalues[0:5], fit, rtol=1e-4)
# ML
data = pd.read_csv(fname)
vcf = {"cask" : "0 + cask"}
model = MixedLM.from_formula("strength ~ 1", groups="batch",
re_formula="1", vc_formula=vcf,
data=data)
result = model.fit(reml=False)
assert_allclose(result.fe_params.iloc[0], 60.0533, rtol=1e-3)
assert_allclose(result.bse.iloc[0], 0.642, rtol=1e-3)
assert_allclose(result.cov_re.iloc[0, 0], 1.199, rtol=1e-3)
assert_allclose(result.scale, 0.67799, rtol=1e-3)
assert_allclose(result.llf, -123.997, rtol=1e-1)
assert_allclose(result.aic, 255.9944, rtol=1e-3)
assert_allclose(result.bic, 264.3718, rtol=1e-3)
def test_vcomp_formula(self):
np.random.seed(6241)
n = 800
exog = np.random.normal(size=(n, 2))
exog[:, 0] = 1
ex_vc = []
groups = np.kron(np.arange(n/4), np.ones(4))
errors = 0
exog_re = np.random.normal(size=(n, 2))
slopes = np.random.normal(size=(n/4, 2))
slopes = np.kron(slopes, np.ones((4, 1))) * exog_re
errors += slopes.sum(1)
ex_vc = np.random.normal(size=(n, 4))
slopes = np.random.normal(size=(n/4, 4))
slopes[:, 2:] *= 2
slopes = np.kron(slopes, np.ones((4, 1))) * ex_vc
errors += slopes.sum(1)
errors += np.random.normal(size=n)
endog = exog.sum(1) + errors
exog_vc = {"a": {}, "b": {}}
for k,group in enumerate(range(int(n/4))):
ix = np.flatnonzero(groups == group)
exog_vc["a"][group] = ex_vc[ix, 0:2]
exog_vc["b"][group] = ex_vc[ix, 2:]
model1 = MixedLM(endog, exog, groups, exog_re=exog_re, exog_vc=exog_vc)
result1 = model1.fit()
df = pd.DataFrame(exog[:, 1:], columns=["x1",])
df["y"] = endog
df["re1"] = exog_re[:, 0]
df["re2"] = exog_re[:, 1]
df["vc1"] = ex_vc[:, 0]
df["vc2"] = ex_vc[:, 1]
df["vc3"] = ex_vc[:, 2]
df["vc4"] = ex_vc[:, 3]
vc_formula = {"a": "0 + vc1 + vc2", "b": "0 + vc3 + vc4"}
model2 = MixedLM.from_formula("y ~ x1", groups=groups, re_formula="0 + re1 + re2",
vc_formula=vc_formula, data=df)
result2 = model2.fit()
assert_allclose(result1.fe_params, result2.fe_params, rtol=1e-8)
assert_allclose(result1.cov_re, result2.cov_re, rtol=1e-8)
assert_allclose(result1.vcomp, result2.vcomp, rtol=1e-8)
assert_allclose(result1.params, result2.params, rtol=1e-8)
assert_allclose(result1.bse, result2.bse, rtol=1e-8)
def test_formulas(self):
np.random.seed(2410)
exog = np.random.normal(size=(300,4))
exog_re = np.random.normal(size=300)
groups = np.kron(np.arange(100), [1,1,1])
g_errors = exog_re * np.kron(np.random.normal(size=100),
[1,1,1])
endog = exog.sum(1) + g_errors + np.random.normal(size=300)
mod1 = MixedLM(endog, exog, groups, exog_re)
# test the names
assert_(mod1.data.xnames == ["x1", "x2", "x3", "x4"])
assert_(mod1.data.exog_re_names == ["Z1"])
assert_(mod1.data.exog_re_names_full == ["Z1 RE"])
rslt1 = mod1.fit()
# Fit with a formula, passing groups as the actual values.
df = pd.DataFrame({"endog": endog})
for k in range(exog.shape[1]):
df["exog%d" % k] = exog[:,k]
df["exog_re"] = exog_re
fml = "endog ~ 0 + exog0 + exog1 + exog2 + exog3"
re_fml = "0 + exog_re"
mod2 = MixedLM.from_formula(fml, df, re_formula=re_fml,
groups=groups)
assert_(mod2.data.xnames == ["exog0", "exog1", "exog2", "exog3"])
assert_(mod2.data.exog_re_names == ["exog_re"])
assert_(mod2.data.exog_re_names_full == ["exog_re RE"])
rslt2 = mod2.fit()
assert_almost_equal(rslt1.params, rslt2.params)
# Fit with a formula, passing groups as the variable name.
df["groups"] = groups
mod3 = MixedLM.from_formula(fml, df, re_formula=re_fml,
groups="groups")
assert_(mod3.data.xnames == ["exog0", "exog1", "exog2", "exog3"])
assert_(mod3.data.exog_re_names == ["exog_re"])
assert_(mod3.data.exog_re_names_full == ["exog_re RE"])
rslt3 = mod3.fit(start_params=rslt2.params)
assert_allclose(rslt1.params, rslt3.params, rtol=1e-4)
# Check default variance structure with non-formula model
# creation.
exog_re = np.ones(len(endog), dtype=np.float64)
mod4 = MixedLM(endog, exog, groups, exog_re)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
rslt4 = mod4.fit(start_params=rslt2.params)
from statsmodels.formula.api import mixedlm
mod5 = mixedlm(fml, df, groups="groups")
assert_(mod5.data.exog_re_names == ["groups"])
assert_(mod5.data.exog_re_names_full == ["groups RE"])
rslt5 = mod5.fit(start_params=rslt2.params)
assert_almost_equal(rslt4.params, rslt5.params)
def test_regularized(self):
np.random.seed(3453)
exog = np.random.normal(size=(400,5))
groups = np.kron(np.arange(100), np.ones(4))
expected_endog = exog[:,0] - exog[:,2]
endog = expected_endog +\
np.kron(np.random.normal(size=100), np.ones(4)) +\
np.random.normal(size=400)
# L1 regularization
md = MixedLM(endog, exog, groups)
mdf1 = md.fit_regularized(alpha=1.)
mdf1.summary()
# L1 regularization
md = MixedLM(endog, exog, groups)
mdf2 = md.fit_regularized(alpha=10*np.ones(5))
mdf2.summary()
# L2 regularization
pen = penalties.L2()
mdf3 = md.fit_regularized(method=pen, alpha=0.)
mdf3.summary()
# L2 regularization
pen = penalties.L2()
mdf4 = md.fit_regularized(method=pen, alpha=100.)
mdf4.summary()
# Pseudo-Huber regularization
pen = penalties.PseudoHuber(0.3)
mdf4 = md.fit_regularized(method=pen, alpha=1.)
mdf4.summary()
def do1(self, reml, irf, ds_ix):
# No need to check independent random effects when there is
# only one of them.
if irf and ds_ix < 6:
return
irfs = "irf" if irf else "drf"
meth = "reml" if reml else "ml"
rslt = R_Results(meth, irfs, ds_ix)
# Fit the model
md = MixedLM(rslt.endog, rslt.exog_fe, rslt.groups,
rslt.exog_re)
if not irf: # Free random effects covariance
mdf = md.fit(gtol=1e-7, reml=reml)
else: # Independent random effects
k_fe = rslt.exog_fe.shape[1]
k_re = rslt.exog_re.shape[1]
free = MixedLMParams(k_fe, k_re, 0)
free.fe_params = np.ones(k_fe)
free.cov_re = np.eye(k_re)
free.vcomp = np.array([])
mdf = md.fit(reml=reml, gtol=1e-7, free=free)
assert_almost_equal(mdf.fe_params, rslt.coef, decimal=4)
assert_almost_equal(mdf.cov_re, rslt.cov_re_r, decimal=4)
assert_almost_equal(mdf.scale, rslt.scale_r, decimal=4)
k_fe = md.k_fe
assert_almost_equal(rslt.vcov_r, mdf.cov_params()[0:k_fe,0:k_fe],
decimal=3)
assert_almost_equal(mdf.llf, rslt.loglike[0], decimal=2)
# Not supported in R except for independent random effects
if not irf:
assert_almost_equal(mdf.random_effects[0], rslt.ranef_postmean,
decimal=3)
assert_almost_equal(mdf.random_effects_cov[0],
rslt.ranef_condvar,
decimal=3)
# Run all the tests against R
def test_r(self):
cur_dir = os.path.dirname(os.path.abspath(__file__))
rdir = os.path.join(cur_dir, 'results')
fnames = os.listdir(rdir)
fnames = [x for x in fnames if x.startswith("lme")
and x.endswith(".csv")]
for fname in fnames:
for reml in False,True:
for irf in False,True:
ds_ix = int(fname[3:5])
yield self.do1, reml, irf, ds_ix
def test_mixed_lm_wrapper():
# a bit more complicated model to test
np.random.seed(2410)
exog = np.random.normal(size=(300, 4))
exog_re = np.random.normal(size=300)
groups = np.kron(np.arange(100), [1, 1, 1])
g_errors = exog_re * np.kron(np.random.normal(size=100),
[1, 1, 1])
endog = exog.sum(1) + g_errors + np.random.normal(size=300)
# Fit with a formula, passing groups as the actual values.
df = pd.DataFrame({"endog": endog})
for k in range(exog.shape[1]):
df["exog%d" % k] = exog[:, k]
df["exog_re"] = exog_re
fml = "endog ~ 0 + exog0 + exog1 + exog2 + exog3"
re_fml = "~ exog_re"
mod2 = MixedLM.from_formula(fml, df, re_formula=re_fml,
groups=groups)
result = mod2.fit()
smoke = result.summary()
xnames = ["exog0", "exog1", "exog2", "exog3"]
re_names = ["Intercept", "exog_re"]
re_names_full = ["Intercept RE", "Intercept RE x exog_re RE",
"exog_re RE"]
assert_(mod2.data.xnames == xnames)
assert_(mod2.data.exog_re_names == re_names)
assert_(mod2.data.exog_re_names_full == re_names_full)
params = result.params
assert_(params.index.tolist() == xnames + re_names_full)
bse = result.bse
assert_(bse.index.tolist() == xnames + re_names_full)
tvalues = result.tvalues
assert_(tvalues.index.tolist() == xnames + re_names_full)
cov_params = result.cov_params()
assert_(cov_params.index.tolist() == xnames + re_names_full)
assert_(cov_params.columns.tolist() == xnames + re_names_full)
fe = result.fe_params
assert_(fe.index.tolist() == xnames)
bse_fe = result.bse_fe
assert_(bse_fe.index.tolist() == xnames)
cov_re = result.cov_re
assert_(cov_re.index.tolist() == re_names)
assert_(cov_re.columns.tolist() == re_names)
cov_re_u = result.cov_re_unscaled
assert_(cov_re_u.index.tolist() == re_names)
assert_(cov_re_u.columns.tolist() == re_names)
bse_re = result.bse_re
assert_(bse_re.index.tolist() == re_names_full)
if __name__=="__main__":
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.