repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
jblackburne/scikit-learn | sklearn/datasets/samples_generator.py | 26 | 56554 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Initialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is zero (see notes). Larger values
enforce more sparsity.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://seat.massey.ac.nz/personal/s.r.marsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
xuewei4d/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 4 | 37254 | import sys
from io import StringIO
import numpy as np
from numpy.testing import assert_allclose
import scipy.sparse as sp
import pytest
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors import kneighbors_graph
from sklearn.exceptions import EfficiencyWarning
from sklearn.utils._testing import ignore_warnings
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import skip_if_32bit
from sklearn.utils import check_random_state
from sklearn.manifold._t_sne import _joint_probabilities
from sklearn.manifold._t_sne import _joint_probabilities_nn
from sklearn.manifold._t_sne import _kl_divergence
from sklearn.manifold._t_sne import _kl_divergence_bh
from sklearn.manifold._t_sne import _gradient_descent
from sklearn.manifold._t_sne import trustworthiness
from sklearn.manifold import TSNE
# mypy error: Module 'sklearn.manifold' has no attribute '_barnes_hut_tsne'
from sklearn.manifold import _barnes_hut_tsne # type: ignore
from sklearn.manifold._utils import _binary_search_perplexity
from sklearn.datasets import make_blobs
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import cosine_distances
x = np.linspace(0, 1, 10)
xx, yy = np.meshgrid(x, x)
X_2d_grid = np.hstack([
xx.ravel().reshape(-1, 1),
yy.ravel().reshape(-1, 1),
])
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _, compute_error=True):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_, compute_error=True):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert error == 1.0
assert it == 0
assert("gradient norm" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert error == 0.0
assert it == 11
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert error == 0.0
assert it == 10
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
data = random_state.randn(50, 5)
distances = pairwise_distances(data).astype(np.float32)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_binary_search_neighbors():
# Binary perplexity search approximation.
# Should be approximately equal to the slow method when we use
# all points as neighbors.
n_samples = 200
desired_perplexity = 25.0
random_state = check_random_state(0)
data = random_state.randn(n_samples, 2).astype(np.float32, copy=False)
distances = pairwise_distances(data)
P1 = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
# Test that when we use all the neighbors the results are identical
n_neighbors = n_samples - 1
nn = NearestNeighbors().fit(data)
distance_graph = nn.kneighbors_graph(n_neighbors=n_neighbors,
mode='distance')
distances_nn = distance_graph.data.astype(np.float32, copy=False)
distances_nn = distances_nn.reshape(n_samples, n_neighbors)
P2 = _binary_search_perplexity(distances_nn, desired_perplexity, verbose=0)
indptr = distance_graph.indptr
P1_nn = np.array([P1[k, distance_graph.indices[indptr[k]:indptr[k + 1]]]
for k in range(n_samples)])
assert_array_almost_equal(P1_nn, P2, decimal=4)
# Test that the highest P_ij are the same when fewer neighbors are used
for k in np.linspace(150, n_samples - 1, 5):
k = int(k)
topn = k * 10 # check the top 10 * k entries out of k * k entries
distance_graph = nn.kneighbors_graph(n_neighbors=k, mode='distance')
distances_nn = distance_graph.data.astype(np.float32, copy=False)
distances_nn = distances_nn.reshape(n_samples, k)
P2k = _binary_search_perplexity(distances_nn, desired_perplexity,
verbose=0)
assert_array_almost_equal(P1_nn, P2, decimal=2)
idx = np.argsort(P1.ravel())[::-1]
P1top = P1.ravel()[idx][:topn]
idx = np.argsort(P2k.ravel())[::-1]
P2top = P2k.ravel()[idx][:topn]
assert_array_almost_equal(P1top, P2top, decimal=2)
def test_binary_perplexity_stability():
# Binary perplexity search should be stable.
# The binary_search_perplexity had a bug wherein the P array
# was uninitialized, leading to sporadically failing tests.
n_neighbors = 10
n_samples = 100
random_state = check_random_state(0)
data = random_state.randn(n_samples, 5)
nn = NearestNeighbors().fit(data)
distance_graph = nn.kneighbors_graph(n_neighbors=n_neighbors,
mode='distance')
distances = distance_graph.data.astype(np.float32, copy=False)
distances = distances.reshape(n_samples, n_neighbors)
last_P = None
desired_perplexity = 3
for _ in range(100):
P = _binary_search_perplexity(distances.copy(), desired_perplexity,
verbose=0)
P1 = _joint_probabilities_nn(distance_graph, desired_perplexity,
verbose=0)
# Convert the sparse matrix to a dense one for testing
P1 = P1.toarray()
if last_P is None:
last_P = P
last_P1 = P1
else:
assert_array_almost_equal(P, last_P, decimal=4)
assert_array_almost_equal(P1, last_P1, decimal=4)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features).astype(np.float32)
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components).astype(np.float32)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
def fun(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[0]
def grad(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert trustworthiness(X, 5.0 + X / 10.0) == 1.0
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert trustworthiness(X, X_embedded) < 0.6
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
@pytest.mark.parametrize("method", ['exact', 'barnes_hut'])
@pytest.mark.parametrize("init", ('random', 'pca'))
def test_preserve_trustworthiness_approximately(method, init):
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
n_components = 2
X = random_state.randn(50, n_components).astype(np.float32)
tsne = TSNE(n_components=n_components, init=init, random_state=0,
method=method, n_iter=700)
X_embedded = tsne.fit_transform(X)
t = trustworthiness(X, X_embedded, n_neighbors=1)
assert t > 0.85
def test_optimization_minimizes_kl_divergence():
"""t-SNE should give a lower KL divergence with more iterations."""
random_state = check_random_state(0)
X, _ = make_blobs(n_features=3, random_state=random_state)
kl_divergences = []
for n_iter in [250, 300, 350]:
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
n_iter=n_iter, random_state=0)
tsne.fit_transform(X)
kl_divergences.append(tsne.kl_divergence_)
assert kl_divergences[1] <= kl_divergences[0]
assert kl_divergences[2] <= kl_divergences[1]
@pytest.mark.parametrize('method', ['exact', 'barnes_hut'])
def test_fit_csr_matrix(method):
# X can be a sparse matrix.
rng = check_random_state(0)
X = rng.randn(50, 2)
X[(rng.randint(0, 50, 25), rng.randint(0, 2, 25))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0, method=method, n_iter=750)
X_embedded = tsne.fit_transform(X_csr)
assert_allclose(trustworthiness(X_csr, X_embedded, n_neighbors=1),
1.0, rtol=1.1e-1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
for i in range(3):
X = random_state.randn(80, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
early_exaggeration=2.0, metric="precomputed",
random_state=i, verbose=0, n_iter=500,
square_distances=True)
X_embedded = tsne.fit_transform(D)
t = trustworthiness(D, X_embedded, n_neighbors=1, metric="precomputed")
assert t > .95
def test_trustworthiness_not_euclidean_metric():
# Test trustworthiness with a metric different from 'euclidean' and
# 'precomputed'
random_state = check_random_state(0)
X = random_state.randn(100, 2)
assert (trustworthiness(X, X, metric='cosine') ==
trustworthiness(pairwise_distances(X, metric='cosine'), X,
metric='precomputed'))
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
with pytest.raises(ValueError, match="early_exaggeration .*"):
tsne.fit_transform(np.array([[0.0], [0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
with pytest.raises(ValueError, match="n_iter .*"):
tsne.fit_transform(np.array([[0.0], [0.0]]))
@pytest.mark.parametrize('method, retype', [
('exact', np.asarray),
('barnes_hut', np.asarray),
('barnes_hut', sp.csr_matrix),
])
@pytest.mark.parametrize('D, message_regex', [
([[0.0], [1.0]], ".* square distance matrix"),
([[0., -1.], [1., 0.]], ".* positive.*"),
])
def test_bad_precomputed_distances(method, D, retype, message_regex):
tsne = TSNE(metric="precomputed", method=method,
square_distances=True)
with pytest.raises(ValueError, match=message_regex):
tsne.fit_transform(retype(D))
def test_exact_no_precomputed_sparse():
tsne = TSNE(metric='precomputed', method='exact', square_distances=True)
with pytest.raises(TypeError, match='sparse'):
tsne.fit_transform(sp.csr_matrix([[0, 5], [5, 0]]))
def test_high_perplexity_precomputed_sparse_distances():
# Perplexity should be less than 50
dist = np.array([[1., 0., 0.], [0., 1., 0.], [1., 0., 0.]])
bad_dist = sp.csr_matrix(dist)
tsne = TSNE(metric="precomputed", square_distances=True)
msg = "3 neighbors per samples are required, but some samples have only 1"
with pytest.raises(ValueError, match=msg):
tsne.fit_transform(bad_dist)
@ignore_warnings(category=EfficiencyWarning)
def test_sparse_precomputed_distance():
"""Make sure that TSNE works identically for sparse and dense matrix"""
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D_sparse = kneighbors_graph(X, n_neighbors=100, mode='distance',
include_self=True)
D = pairwise_distances(X)
assert sp.issparse(D_sparse)
assert_almost_equal(D_sparse.A, D)
tsne = TSNE(metric="precomputed", random_state=0, square_distances=True)
Xt_dense = tsne.fit_transform(D)
for fmt in ['csr', 'lil']:
Xt_sparse = tsne.fit_transform(D_sparse.asformat(fmt))
assert_almost_equal(Xt_dense, Xt_sparse)
def test_non_positive_computed_distances():
# Computed distance matrices must be positive.
def metric(x, y):
return -1
# Negative computed distances should be caught even if result is squared
tsne = TSNE(metric=metric, method='exact', square_distances=True)
X = np.array([[0.0, 0.0], [1.0, 1.0]])
with pytest.raises(ValueError, match="All distances .*metric given.*"):
tsne.fit_transform(X)
def test_init_not_available():
# 'init' must be 'pca', 'random', or numpy array.
tsne = TSNE(init="not available")
m = "'init' must be 'pca', 'random', or a numpy array"
with pytest.raises(ValueError, match=m):
tsne.fit_transform(np.array([[0.0], [1.0]]))
def test_init_ndarray():
# Initialize TSNE with ndarray and test fit
tsne = TSNE(init=np.zeros((100, 2)))
X_embedded = tsne.fit_transform(np.ones((100, 5)))
assert_array_equal(np.zeros((100, 2)), X_embedded)
def test_init_ndarray_precomputed():
# Initialize TSNE with ndarray and metric 'precomputed'
# Make sure no FutureWarning is thrown from _fit
tsne = TSNE(init=np.zeros((100, 2)), metric="precomputed",
square_distances=True)
tsne.fit(np.zeros((100, 100)))
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available", method='exact', square_distances=True)
with pytest.raises(ValueError, match="Unknown metric not available.*"):
tsne.fit_transform(np.array([[0.0], [1.0]]))
tsne = TSNE(metric="not available", method='barnes_hut',
square_distances=True)
with pytest.raises(ValueError, match="Metric 'not available' not valid.*"):
tsne.fit_transform(np.array([[0.0], [1.0]]))
def test_method_not_available():
# 'nethod' must be 'barnes_hut' or 'exact'
tsne = TSNE(method='not available')
with pytest.raises(ValueError, match="'method' must be 'barnes_hut' or "):
tsne.fit_transform(np.array([[0.0], [1.0]]))
def test_square_distances_not_available():
# square_distances must be True or 'legacy'.
tsne = TSNE(square_distances="not_available")
with pytest.raises(ValueError, match="'square_distances' must be True or"):
tsne.fit_transform(np.array([[0.0], [1.0]]))
def test_angle_out_of_range_checks():
# check the angle parameter range
for angle in [-1, -1e-6, 1 + 1e-6, 2]:
tsne = TSNE(angle=angle)
with pytest.raises(ValueError, match="'angle' must be between "
"0.0 - 1.0"):
tsne.fit_transform(np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca", square_distances=True)
with pytest.raises(ValueError, match="The parameter init=\"pca\" cannot"
" be used with"
" metric=\"precomputed\"."):
tsne.fit_transform(np.array([[0.0], [1.0]]))
def test_n_components_range():
# barnes_hut method should only be used with n_components <= 3
tsne = TSNE(n_components=4, method="barnes_hut")
with pytest.raises(ValueError, match="'n_components' should be .*"):
tsne.fit_transform(np.array([[0.0], [1.0]]))
def test_early_exaggeration_used():
# check that the ``early_exaggeration`` parameter has an effect
random_state = check_random_state(0)
n_components = 2
methods = ['exact', 'barnes_hut']
X = random_state.randn(25, n_components).astype(np.float32)
for method in methods:
tsne = TSNE(n_components=n_components, perplexity=1,
learning_rate=100.0, init="pca", random_state=0,
method=method, early_exaggeration=1.0, n_iter=250)
X_embedded1 = tsne.fit_transform(X)
tsne = TSNE(n_components=n_components, perplexity=1,
learning_rate=100.0, init="pca", random_state=0,
method=method, early_exaggeration=10.0, n_iter=250)
X_embedded2 = tsne.fit_transform(X)
assert not np.allclose(X_embedded1, X_embedded2)
def test_n_iter_used():
# check that the ``n_iter`` parameter has an effect
random_state = check_random_state(0)
n_components = 2
methods = ['exact', 'barnes_hut']
X = random_state.randn(25, n_components).astype(np.float32)
for method in methods:
for n_iter in [251, 500]:
tsne = TSNE(n_components=n_components, perplexity=1,
learning_rate=0.5, init="random", random_state=0,
method=method, early_exaggeration=1.0, n_iter=n_iter)
tsne.fit_transform(X)
assert tsne.n_iter_ == n_iter - 1
def test_answer_gradient_two_points():
# Test the tree with only a single set of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0]])
pos_output = np.array([[-4.961291e-05, -1.072243e-04],
[9.259460e-05, 2.702024e-04]])
neighbors = np.array([[1],
[0]])
grad_output = np.array([[-2.37012478e-05, -6.29044398e-05],
[2.37012478e-05, 6.29044398e-05]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_answer_gradient_four_points():
# Four points tests the tree with multiple levels of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[5.81128448e-05, -7.78033454e-06],
[-5.81526851e-05, 7.80976444e-06],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_skip_num_points_gradient():
# Test the kwargs option skip_num_points.
#
# Skip num points should make it such that the Barnes_hut gradient
# is not calculated for indices below skip_num_point.
# Aside from skip_num_points=2 and the first two gradient rows
# being set to zero, these data points are the same as in
# test_answer_gradient_four_points()
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[0.0, 0.0],
[0.0, 0.0],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output,
False, 0.1, 2)
def _run_answer_test(pos_input, pos_output, neighbors, grad_output,
verbose=False, perplexity=0.1, skip_num_points=0):
distances = pairwise_distances(pos_input).astype(np.float32)
args = distances, perplexity, verbose
pos_output = pos_output.astype(np.float32)
neighbors = neighbors.astype(np.int64, copy=False)
pij_input = _joint_probabilities(*args)
pij_input = squareform(pij_input).astype(np.float32)
grad_bh = np.zeros(pos_output.shape, dtype=np.float32)
from scipy.sparse import csr_matrix
P = csr_matrix(pij_input)
neighbors = P.indices.astype(np.int64)
indptr = P.indptr.astype(np.int64)
_barnes_hut_tsne.gradient(P.data, pos_output, neighbors, indptr,
grad_bh, 0.5, 2, 1, skip_num_points=0)
assert_array_almost_equal(grad_bh, grad_output, decimal=4)
def test_verbose():
# Verbose options write to stdout.
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("nearest neighbors..." in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("early exaggeration" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev", square_distances=True)
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
@pytest.mark.parametrize('method', ['barnes_hut', 'exact'])
@pytest.mark.parametrize('dt', [np.float32, np.float64])
def test_64bit(method, dt):
# Ensure 64bit arrays are handled correctly.
random_state = check_random_state(0)
X = random_state.randn(10, 2).astype(dt, copy=False)
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
random_state=0, method=method, verbose=0,
n_iter=300)
X_embedded = tsne.fit_transform(X)
effective_type = X_embedded.dtype
# tsne cython code is only single precision, so the output will
# always be single precision, irrespectively of the input dtype
assert effective_type == np.float32
@pytest.mark.parametrize('method', ['barnes_hut', 'exact'])
def test_kl_divergence_not_nan(method):
# Ensure kl_divergence_ is computed at last iteration
# even though n_iter % n_iter_check != 0, i.e. 1003 % 50 != 0
random_state = check_random_state(0)
X = random_state.randn(50, 2)
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
random_state=0, method=method, verbose=0, n_iter=503)
tsne.fit_transform(X)
assert not np.isnan(tsne.kl_divergence_)
def test_barnes_hut_angle():
# When Barnes-Hut's angle=0 this corresponds to the exact method.
angle = 0.0
perplexity = 10
n_samples = 100
for n_components in [2, 3]:
n_features = 5
degrees_of_freedom = float(n_components - 1.0)
random_state = check_random_state(0)
data = random_state.randn(n_samples, n_features)
distances = pairwise_distances(data)
params = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, perplexity, verbose=0)
kl_exact, grad_exact = _kl_divergence(params, P, degrees_of_freedom,
n_samples, n_components)
n_neighbors = n_samples - 1
distances_csr = NearestNeighbors().fit(data).kneighbors_graph(
n_neighbors=n_neighbors, mode='distance')
P_bh = _joint_probabilities_nn(distances_csr, perplexity, verbose=0)
kl_bh, grad_bh = _kl_divergence_bh(params, P_bh, degrees_of_freedom,
n_samples, n_components,
angle=angle, skip_num_points=0,
verbose=0)
P = squareform(P)
P_bh = P_bh.toarray()
assert_array_almost_equal(P_bh, P, decimal=5)
assert_almost_equal(kl_exact, kl_bh, decimal=3)
@skip_if_32bit
def test_n_iter_without_progress():
# Use a dummy negative n_iter_without_progress and check output on stdout
random_state = check_random_state(0)
X = random_state.randn(100, 10)
for method in ["barnes_hut", "exact"]:
tsne = TSNE(n_iter_without_progress=-1, verbose=2, learning_rate=1e8,
random_state=0, method=method, n_iter=351, init="random")
tsne._N_ITER_CHECK = 1
tsne._EXPLORATION_N_ITER = 0
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
# The output needs to contain the value of n_iter_without_progress
assert ("did not make any progress during the "
"last -1 episodes. Finished." in out)
def test_min_grad_norm():
# Make sure that the parameter min_grad_norm is used correctly
random_state = check_random_state(0)
X = random_state.randn(100, 2)
min_grad_norm = 0.002
tsne = TSNE(min_grad_norm=min_grad_norm, verbose=2,
random_state=0, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
lines_out = out.split('\n')
# extract the gradient norm from the verbose output
gradient_norm_values = []
for line in lines_out:
# When the computation is Finished just an old gradient norm value
# is repeated that we do not need to store
if 'Finished' in line:
break
start_grad_norm = line.find('gradient norm')
if start_grad_norm >= 0:
line = line[start_grad_norm:]
line = line.replace('gradient norm = ', '').split(' ')[0]
gradient_norm_values.append(float(line))
# Compute how often the gradient norm is smaller than min_grad_norm
gradient_norm_values = np.array(gradient_norm_values)
n_smaller_gradient_norms = \
len(gradient_norm_values[gradient_norm_values <= min_grad_norm])
# The gradient norm can be smaller than min_grad_norm at most once,
# because in the moment it becomes smaller the optimization stops
assert n_smaller_gradient_norms <= 1
def test_accessible_kl_divergence():
# Ensures that the accessible kl_divergence matches the computed value
random_state = check_random_state(0)
X = random_state.randn(50, 2)
tsne = TSNE(n_iter_without_progress=2, verbose=2,
random_state=0, method='exact',
n_iter=500)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
# The output needs to contain the accessible kl_divergence as the error at
# the last iteration
for line in out.split('\n')[::-1]:
if 'Iteration' in line:
_, _, error = line.partition('error = ')
if error:
error, _, _ = error.partition(',')
break
assert_almost_equal(tsne.kl_divergence_, float(error), decimal=5)
@pytest.mark.parametrize('method', ['barnes_hut', 'exact'])
def test_uniform_grid(method):
"""Make sure that TSNE can approximately recover a uniform 2D grid
Due to ties in distances between point in X_2d_grid, this test is platform
dependent for ``method='barnes_hut'`` due to numerical imprecision.
Also, t-SNE is not assured to converge to the right solution because bad
initialization can lead to convergence to bad local minimum (the
optimization problem is non-convex). To avoid breaking the test too often,
we re-run t-SNE from the final point when the convergence is not good
enough.
"""
seeds = range(3)
n_iter = 500
for seed in seeds:
tsne = TSNE(n_components=2, init='random', random_state=seed,
perplexity=50, n_iter=n_iter, method=method)
Y = tsne.fit_transform(X_2d_grid)
try_name = "{}_{}".format(method, seed)
try:
assert_uniform_grid(Y, try_name)
except AssertionError:
# If the test fails a first time, re-run with init=Y to see if
# this was caused by a bad initialization. Note that this will
# also run an early_exaggeration step.
try_name += ":rerun"
tsne.init = Y
Y = tsne.fit_transform(X_2d_grid)
assert_uniform_grid(Y, try_name)
def assert_uniform_grid(Y, try_name=None):
# Ensure that the resulting embedding leads to approximately
# uniformly spaced points: the distance to the closest neighbors
# should be non-zero and approximately constant.
nn = NearestNeighbors(n_neighbors=1).fit(Y)
dist_to_nn = nn.kneighbors(return_distance=True)[0].ravel()
assert dist_to_nn.min() > 0.1
smallest_to_mean = dist_to_nn.min() / np.mean(dist_to_nn)
largest_to_mean = dist_to_nn.max() / np.mean(dist_to_nn)
assert smallest_to_mean > .5, try_name
assert largest_to_mean < 2, try_name
def test_bh_match_exact():
# check that the ``barnes_hut`` method match the exact one when
# ``angle = 0`` and ``perplexity > n_samples / 3``
random_state = check_random_state(0)
n_features = 10
X = random_state.randn(30, n_features).astype(np.float32)
X_embeddeds = {}
n_iter = {}
for method in ['exact', 'barnes_hut']:
tsne = TSNE(n_components=2, method=method, learning_rate=1.0,
init="random", random_state=0, n_iter=251,
perplexity=30.0, angle=0)
# Kill the early_exaggeration
tsne._EXPLORATION_N_ITER = 0
X_embeddeds[method] = tsne.fit_transform(X)
n_iter[method] = tsne.n_iter_
assert n_iter['exact'] == n_iter['barnes_hut']
assert_allclose(X_embeddeds['exact'], X_embeddeds['barnes_hut'], rtol=1e-4)
def test_gradient_bh_multithread_match_sequential():
# check that the bh gradient with different num_threads gives the same
# results
n_features = 10
n_samples = 30
n_components = 2
degrees_of_freedom = 1
angle = 3
perplexity = 5
random_state = check_random_state(0)
data = random_state.randn(n_samples, n_features).astype(np.float32)
params = random_state.randn(n_samples, n_components)
n_neighbors = n_samples - 1
distances_csr = NearestNeighbors().fit(data).kneighbors_graph(
n_neighbors=n_neighbors, mode='distance')
P_bh = _joint_probabilities_nn(distances_csr, perplexity, verbose=0)
kl_sequential, grad_sequential = _kl_divergence_bh(
params, P_bh, degrees_of_freedom, n_samples, n_components,
angle=angle, skip_num_points=0, verbose=0, num_threads=1)
for num_threads in [2, 4]:
kl_multithread, grad_multithread = _kl_divergence_bh(
params, P_bh, degrees_of_freedom, n_samples, n_components,
angle=angle, skip_num_points=0, verbose=0, num_threads=num_threads)
assert_allclose(kl_multithread, kl_sequential, rtol=1e-6)
assert_allclose(grad_multithread, grad_multithread)
def test_tsne_with_different_distance_metrics():
"""Make sure that TSNE works for different distance metrics"""
random_state = check_random_state(0)
n_components_original = 3
n_components_embedding = 2
X = random_state.randn(50, n_components_original).astype(np.float32)
metrics = ['manhattan', 'cosine']
dist_funcs = [manhattan_distances, cosine_distances]
for metric, dist_func in zip(metrics, dist_funcs):
X_transformed_tsne = TSNE(
metric=metric, n_components=n_components_embedding,
random_state=0, n_iter=300, square_distances=True).fit_transform(X)
X_transformed_tsne_precomputed = TSNE(
metric='precomputed', n_components=n_components_embedding,
random_state=0, n_iter=300,
square_distances=True).fit_transform(dist_func(X))
assert_array_equal(X_transformed_tsne, X_transformed_tsne_precomputed)
@pytest.mark.parametrize('method', ['exact', 'barnes_hut'])
@pytest.mark.parametrize('metric', ['euclidean', 'manhattan'])
@pytest.mark.parametrize('square_distances', [True, 'legacy'])
@ignore_warnings(category=FutureWarning)
def test_tsne_different_square_distances(method, metric, square_distances):
# Make sure that TSNE works for different square_distances settings
# FIXME remove test when square_distances=True becomes the default in 1.1
random_state = check_random_state(0)
n_components_original = 3
n_components_embedding = 2
# Used to create data with structure; this avoids unstable behavior in TSNE
X, _ = make_blobs(n_features=n_components_original,
random_state=random_state)
X_precomputed = pairwise_distances(X, metric=metric)
if metric == 'euclidean' and square_distances == 'legacy':
X_precomputed **= 2
X_transformed_tsne = TSNE(
metric=metric, n_components=n_components_embedding,
square_distances=square_distances, method=method,
random_state=0).fit_transform(X)
X_transformed_tsne_precomputed = TSNE(
metric='precomputed', n_components=n_components_embedding,
square_distances=square_distances, method=method,
random_state=0).fit_transform(X_precomputed)
assert_allclose(X_transformed_tsne, X_transformed_tsne_precomputed)
@pytest.mark.parametrize('metric', ['euclidean', 'manhattan'])
@pytest.mark.parametrize('square_distances', [True, 'legacy'])
def test_tsne_square_distances_futurewarning(metric, square_distances):
# Make sure that a FutureWarning is only raised when a non-Euclidean
# metric is specified and square_distances is not set to True.
random_state = check_random_state(0)
X = random_state.randn(5, 2)
tsne = TSNE(metric=metric, square_distances=square_distances)
if metric != 'euclidean' and square_distances is not True:
with pytest.warns(FutureWarning, match="'square_distances'.*"):
tsne.fit_transform(X)
else:
with pytest.warns(None) as record:
tsne.fit_transform(X)
assert not record
@pytest.mark.parametrize('method', ['exact', 'barnes_hut'])
def test_tsne_n_jobs(method):
"""Make sure that the n_jobs parameter doesn't impact the output"""
random_state = check_random_state(0)
n_features = 10
X = random_state.randn(30, n_features)
X_tr_ref = TSNE(n_components=2, method=method, perplexity=30.0,
angle=0, n_jobs=1, random_state=0).fit_transform(X)
X_tr = TSNE(n_components=2, method=method, perplexity=30.0,
angle=0, n_jobs=2, random_state=0).fit_transform(X)
assert_allclose(X_tr_ref, X_tr)
| bsd-3-clause |
quheng/scikit-learn | sklearn/metrics/tests/test_ranking.py | 127 | 40813 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
mvfcopetti/pySSN | pyssn/qt/pyssn_qt.py | 1 | 219113 | """
This is the window manager part of pySSN
pySSN is available under the GNU licence providing you cite the developpers names:
Ch. Morisset (Instituto de Astronomia, Universidad Nacional Autonoma de Mexico)
D. Pequignot (Meudon Observatory, France)
Inspired by a demo code by:
Eli Bendersky (eliben@gmail.com)
"""
import sys, os
from PyQt4 import QtCore, QtGui
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
#from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import numpy as np
from pyssn import log_, __version__
from ..core.spectrum import spectrum
from ..utils.misc import get_parser
from collections import OrderedDict
from ..utils.physics import CST
log_.level = 4
#ToDo :
class NavigationToolbar( NavigationToolbar2QT ):
curs = QtCore.pyqtSignal(bool)
def __init__(self, canvas, parent ):
NavigationToolbar2QT.__init__(self,canvas,parent)
self.clearButtons=[]
# Search through existing buttons
# next use for placement of custom button
next=None
for c in self.findChildren(QtGui.QToolButton):
if next is None:
next=c
# Don't want to see subplots and customize
"""
if str(c.text()) in ('Subplots', 'Customize'):
c.defaultAction().setVisible(False)
continue
"""
# Need to keep track of pan and zoom buttons
# Also grab toggled event to clear checked status of picker button
if str(c.text()) in ('Pan','Zoom'):
c.toggled.connect(self.clearCurs)
self.clearButtons.append(c)
next=None
# create custom button
pm=QtGui.QPixmap(32,32)
pm.fill(QtGui.QApplication.palette().color(QtGui.QPalette.Normal,QtGui.QPalette.Button))
painter=QtGui.QPainter(pm)
painter.fillRect(6,6,20,20,QtCore.Qt.red)
painter.fillRect(15,3,3,26,QtCore.Qt.blue)
painter.fillRect(3,15,26,3,QtCore.Qt.blue)
painter.end()
icon=QtGui.QIcon(pm)
ac = self.addAction(icon, "Toggle Curs")
ac.setCheckable(True)
#Ver como inicializar
#ac.setChecked(True)
ac.toggled.connect(self.curs_toggle)
self.ac = ac
#button=QtGui.QToolButton(self)
#button.setDefaultAction(self.ac)
# Add it to the toolbar, and connect up event
#self.insertWidget(next.defaultAction(),button)
# Grab the picked event from the canvas
canvas.mpl_connect('pick_event',self.canvasPicked)
def clearCurs(self, checked):
if checked:
self.ac.setChecked(False)
def curs_toggle(self, checked):
self.curs.emit(checked)
def canvasPicked(self, event):
if self.ac.isChecked():
self.curs.emit(event.ind)
class AppForm(QtGui.QMainWindow):
def __init__(self, parent=None, init_filename=None, post_proc_file=None, use_workspace=False):
self.calling = 'pySSN GUI'
self.use_workspace = use_workspace
QtGui.QMainWindow.__init__(self, parent)
self.setWindowTitle('pySSN')
self.sp = None
self.axes = None
self.axes2 = None
self.axes3 = None
self.fig = None
self.init_file_name = init_filename
self.init_line_num = None
self.init_ion = None
self.init_xmin = None
self.init_xmax = None
self.init_y1min = None
self.init_y1max = None
self.init_y3min = None
self.init_y3max = None
self.init_legend_fontsize = None
self.init_legend_loc = None
self.init_nearby_line_num = None
self.init_nearby_ion = None
self.init_nearby_xmin = None
self.init_nearby_xmax = None
self.init_nearby_y1min = None
self.init_nearby_y1max = None
self.init_nearby_y3min = None
self.init_nearby_y3max = None
self.init_nearby_legend_fontsize = None
self.init_nearby_legend_loc = None
self.init_cont_line_num = None
self.init_cont_ion = None
self.init_cont_xmin = None
self.init_cont_xmax = None
self.init_cont_y1min = None
self.init_cont_y1max = None
self.init_cont_y3min = None
self.init_cont_y3max = None
self.init_cont_legend_fontsize = None
self.init_cont_legend_loc = None
self.call_on_draw = True
self.cursor_on = False
self.line_info_ref = 0
self.x_plot_lims = None
self.y1_plot_lims = None
self.y2_plot_lims = None
self.y3_plot_lims = None
self.xscale = None
self.yscale = None
self.post_proc_file = post_proc_file
self.tick_file = None
self.save_parameters_file = None
self.do_save = True
self.cont_par_changed = False
self.axes_fixed = False
self.showErrorBox = True
self.create_menu()
self.create_main_frame()
self.create_status_bar()
self.exec_init()
self.cont_pars_dialog = None
self.cursor_w1 = None
self.cursor_w2 = None
self.nearbyLines = None
self.nearbyLines_sort_by = 'i_tot'
self.nearbyLines_sort_reverse = True
self.nearbyLines_dialog = None
self.nearbyLines_selected_ions = None
self.line_info_dialog = None
self.instr_prof_dialog = None
self.refine_wave_dialog = None
self.refine_wave_as_table = False
self.interpol_cont_dialog = None
self.interpol_cont_as_table = False
self.fig_prof = None
self.green_tick_shown = False
self.magenta_tick_shown = False
self.addGreenTickToLegend = True
self.show_true_ions = False
self.nearbyDialogFilterIsActive = False
self.get_user_cont_points = False
self.del_user_cont_points = False
self.user_cont_editBox = None
self.showHelpBrowser = False
def closeEvent(self, evnt):
if self.sp.get_conf('save_parameters_on_exit'):
self.save_pars_as()
if self.cont_pars_dialog is not None:
self.cont_pars_dialog.close()
if self.nearbyLines_dialog is not None:
self.nearbyLines_dialog.close()
if self.line_info_dialog is not None:
self.line_info_dialog.close()
self.line_info_table.close()
if self.instr_prof_dialog is not None:
self.instr_prof_dialog.close()
if self.refine_wave_dialog is not None:
self.refine_wave_dialog.close()
if self.interpol_cont_dialog is not None:
self.interpol_cont_dialog.close()
def image_extension_list(self):
filetypes = self.canvas.get_supported_filetypes()
file_extensions = filetypes.keys()
file_extensions.sort()
return file_extensions
def image_filter(self, fileExt=''):
filetypes = self.canvas.get_supported_filetypes_grouped()
imagetype_list = filetypes.keys()
imagetype_list.sort()
s = ''
k = 0
for imagetype in imagetype_list:
extension_list = filetypes[ imagetype ]
if fileExt in extension_list:
k = imagetype_list.index(imagetype)
s = s + str(imagetype)
s1 = ' (*.' + str(extension_list[0])
for extension in extension_list[1:]:
s1 = s1 + ' *.' + str(extension)
s1 = s1 + ')'
s = s + s1 + s1 + ';;'
filter_str = s[:-2]
selectedFilter = s.split(';;')[k]
return filter_str, selectedFilter
def save_plot(self):
path = self.sp.get_conf('plot_filename')
self.canvas.print_figure(path, dpi=self.dpi)
self.statusBar().showMessage('Plot saved to file %s' % path, 2000)
def save_plot_as(self):
path = self.sp.get_conf('plot_filename')
extension = os.path.splitext(path)[1][1:].lower()
file_choices, selectedFilter = self.image_filter(extension)
path = unicode(QtGui.QFileDialog.getSaveFileName(self, 'Save plot to file', path, file_choices, selectedFilter))
if path:
extension = os.path.splitext(path)[1][1:].lower()
if extension in self.image_extension_list():
self.sp.set_conf('plot_filename', path)
self.canvas.print_figure(path, dpi=self.dpi)
self.statusBar().showMessage('Plot saved to file %s' % path, 2000)
else:
title = 'Error saving plot'
msg = 'Format "{0}" not supported.'.format(extension)
msg = msg + '\nSupported formats: '
extension_list = self.image_extension_list()
n = len(extension_list)-1
s = ''
for i in range(0,n):
s = s + extension_list[i] + ', '
s = s + extension_list[n] + '.'
msg = msg + s
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
def on_about(self):
msg = """ pySSN (Spectral Synthesis for Nebulae):
"""
QtGui.QMessageBox.about(self, "About the demo", msg.strip())
def set_cursor(self, checked):
self.cursor_on = checked
self.sp.firstClick = True
def on_click(self, event):
if self.get_user_cont_points and self.user_cont_editBox is not None:
wave = event.xdata
i_list = [i for i in range(len(self.sp.w)-1) if self.sp.w[i] <= wave <= self.sp.w[i+1] or self.sp.w[i+1] <= wave <= self.sp.w[i]]
if len(i_list) == 1:
i = i_list[0]
c = self.sp.cont[i] - self.sp.conts['user'][i]
self.user_cont_editBox.append('{:<7.1f} {:.2f}'.format(event.xdata, event.ydata-c))
self.update_user_cont()
elif ( self.del_user_cont_points and
self.user_cont_editBox is not None and
self.sp.get_conf('cont_user_table') is not None ):
wave = event.xdata
points = self.sp.get_conf('cont_user_table')[:]
if points is not None and len(points) > 0:
points.remove(min(points, key=lambda x:abs(x[0]-wave)))
self.user_cont_list2table(points)
self.update_user_cont()
elif self.cursor_on:
do_print = not self.sp.get_conf('qt_show_dialogs', True)
nearbyLines = self.sp.nearby_lines(event, do_print, sort='i_tot', reverse=True)
if nearbyLines is None:
return
self.nearbyLines = nearbyLines
if not do_print:
self.show_nearbyLines_dialog()
def sort_nearbyLines(self, sort, reverse=False):
if self.nearbyLines is None:
return
if sort == 'proc':
sorts = np.argsort([ self.sp.process[str(line_num)[-9]] for line_num in self.nearbyLines['num'] ])
else:
sorts = np.argsort(self.nearbyLines[sort])
if reverse:
sorts = sorts[::-1]
self.nearbyLines = np.array(self.nearbyLines)[sorts]
def create_main_frame(self):
if self.use_workspace:
self.main_frame = QtGui.QWorkspace()
else:
self.main_frame = QtGui.QWidget()
# Create the mpl Figure and FigCanvas objects.
#
self.dpi = 100
#self.fig = plt.figure(figsize=(15,15))
self.fig = plt.figure(figsize=(15,15))
# self.fig = plt.figure(figsize=(20.0, 15.0), dpi=self.dpi)
log_.debug('creating figure {}'.format(id(self.fig)), calling=self.calling)
self.canvas = FigureCanvas(self.fig)
if self.use_workspace:
self.main_frame.addWindow(self.canvas)
self.fig2 = Figure((20.0, 15.0), dpi=self.dpi)
self.canvas2 = FigureCanvas(self.fig2)
#self.main_frame.addWindow(self.canvas2)
else:
self.canvas.setParent(self.main_frame)
self.canvas.mpl_connect('button_press_event', self.on_click)
self.canvas.mpl_connect('figure_leave_event', self.leave_fig)
# Create the navigation toolbar, tied to the canvas
#
self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
self.mpl_toolbar.curs.connect(self.set_cursor)
# Other GUI controls
#
self.fix_axes_cb = QtGui.QCheckBox("fix")
self.fix_axes_cb.setChecked(False)
self.connect(self.fix_axes_cb, QtCore.SIGNAL('stateChanged(int)'), self.fix_axes)
self.xlim_min_box = QtGui.QLineEdit()
self.xlim_min_box.setMinimumWidth(50)
#self.connect(self.xlim_min_box, QtCore.SIGNAL('editingFinished()'), self.validate_xlim_min)
self.connect(self.xlim_min_box, QtCore.SIGNAL('returnPressed()'), self.set_plot_limits_and_draw)
self.xlim_max_box = QtGui.QLineEdit()
self.xlim_max_box.setMinimumWidth(50)
#self.connect(self.xlim_max_box, QtCore.SIGNAL('editingFinished()'), self.validate_xlim_max)
#self.xlim_max_box.editingFinished.connect(self.validate_xlim_max)
self.connect(self.xlim_max_box, QtCore.SIGNAL('returnPressed()'), self.set_plot_limits_and_draw)
self.y1lim_min_box = QtGui.QLineEdit()
self.y1lim_min_box.setMinimumWidth(50)
#self.connect(self.y1lim_min_box, QtCore.SIGNAL('editingFinished()'), self.validate_y1lim_min)
self.connect(self.y1lim_min_box, QtCore.SIGNAL('returnPressed()'), self.set_plot_limits_and_draw)
self.y1lim_max_box = QtGui.QLineEdit()
self.y1lim_max_box.setMinimumWidth(50)
#self.connect(self.y1lim_max_box, QtCore.SIGNAL('editingFinished()'), self.validate_y1lim_max)
self.connect(self.y1lim_max_box, QtCore.SIGNAL('returnPressed()'), self.set_plot_limits_and_draw)
self.y3lim_min_box = QtGui.QLineEdit()
self.y3lim_min_box.setMinimumWidth(50)
#self.connect(self.y3lim_min_box, QtCore.SIGNAL('editingFinished()'), self.validate_y3lim_min)
self.connect(self.y3lim_min_box, QtCore.SIGNAL('returnPressed()'), self.set_plot_limits_and_draw)
self.y3lim_max_box = QtGui.QLineEdit()
self.y3lim_max_box.setMinimumWidth(50)
#self.connect(self.y3lim_max_box, QtCore.SIGNAL('editingFinished()'), self.validate_y3lim_max)
self.connect(self.y3lim_max_box, QtCore.SIGNAL('returnPressed()'), self.set_plot_limits_and_draw)
self.run_button = QtGui.QPushButton("Run")
self.connect(self.run_button, QtCore.SIGNAL('clicked()'), self.rerun)
self.draw_button = QtGui.QPushButton("Draw")
self.connect(self.draw_button, QtCore.SIGNAL('clicked()'), self.on_draw)
self.Command_GroupBox = QtGui.QGroupBox("Execute")
self.Command_GroupBox.setCheckable(False)
self.ObsSpec_GroupBox = QtGui.QGroupBox("Parameters of the synthetic spectrum")
self.ObsSpec_GroupBox.setCheckable(False)
self.SpecPlot_GroupBox = QtGui.QGroupBox("Plot of spectra")
self.SpecPlot_GroupBox.setCheckable(False)
self.lineIDs_GroupBox = QtGui.QGroupBox("Show lines")
self.lineIDs_GroupBox.setCheckable(True)
self.lineIDs_GroupBox.setChecked(True)
self.connect(self.lineIDs_GroupBox, QtCore.SIGNAL('clicked()'), self.show_lines_clicked)
self.lineIDs_GroupBox_ToolTip = 'Check to show ticks at the central positions of the spectral lines and plot the lines of selected ions'
self.residual_GroupBox = QtGui.QGroupBox("Plot of residuals")
self.residual_GroupBox.setCheckable(True)
self.residual_GroupBox.setChecked(True)
self.connect(self.residual_GroupBox, QtCore.SIGNAL('clicked()'), self.residual_box_clicked)
self.residual_GroupBox_ToolTip = 'Check to display the residual plot'
self.adjust_button = QtGui.QPushButton("Update")
self.adjust_button.setChecked(False)
self.connect(self.adjust_button, QtCore.SIGNAL('clicked()'), self.adjust)
self.post_proc_button = QtGui.QPushButton("Post proc")
self.post_proc_button.setChecked(False)
self.connect(self.post_proc_button, QtCore.SIGNAL('clicked()'), self.apply_post_proc)
self.update_profile_button = QtGui.QPushButton("Update profiles")
self.update_profile_button.setChecked(False)
self.connect(self.update_profile_button, QtCore.SIGNAL('clicked()'), self.update_profile)
self.sp_min_box = QtGui.QLineEdit()
self.sp_min_box.setMinimumWidth(50)
#self.connect(self.sp_min_box, QtCore.SIGNAL('editingFinished()'), self.set_limit_sp)
self.connect(self.sp_min_box, QtCore.SIGNAL('returnPressed()'), self.set_limit_sp_and_run)
self.sp_max_box = QtGui.QLineEdit()
self.sp_max_box.setMinimumWidth(50)
#self.connect(self.sp_max_box, QtCore.SIGNAL('editingFinished()'), self.set_limit_sp)
self.connect(self.sp_max_box, QtCore.SIGNAL('returnPressed()'), self.set_limit_sp_and_run)
self.sp_norm_box = QtGui.QLineEdit()
self.sp_norm_box.setMinimumWidth(50)
self.connect(self.sp_norm_box, QtCore.SIGNAL('returnPressed()'), self.sp_norm)
self.obj_velo_box = QtGui.QLineEdit()
self.obj_velo_box.setMinimumWidth(50)
self.connect(self.obj_velo_box, QtCore.SIGNAL('returnPressed()'), self.obj_velo)
self.ebv_box = QtGui.QLineEdit()
self.ebv_box.setMinimumWidth(50)
self.connect(self.ebv_box, QtCore.SIGNAL('returnPressed()'), self.ebv)
self.resol_box = QtGui.QLineEdit()
self.resol_box.setMinimumWidth(50)
self.connect(self.resol_box, QtCore.SIGNAL('returnPressed()'), self.resol)
self.cut2_box = QtGui.QLineEdit()
self.cut2_box.setMinimumWidth(50)
self.connect(self.cut2_box, QtCore.SIGNAL('returnPressed()'), self.cut2)
self.cut_cb = QtGui.QCheckBox('')
self.cut_cb.setChecked(False)
self.connect(self.cut_cb, QtCore.SIGNAL('clicked()'), self.cut_cb_changed)
self.ion_box = QtGui.QLineEdit()
self.ion_box.setMinimumWidth(70)
self.connect(self.ion_box, QtCore.SIGNAL('returnPressed()'), self.draw_ion)
self.ion_cb = QtGui.QCheckBox('')
self.ion_cb.setChecked(False)
self.connect(self.ion_cb, QtCore.SIGNAL('clicked()'), self.ion_cb_changed)
self.line_info_box = QtGui.QLineEdit()
self.line_info_box.setFixedWidth(130)
self.connect(self.line_info_box, QtCore.SIGNAL('returnPressed()'), self.line_info)
self.mpl_toolbar.addSeparator()
self.mpl_toolbar.addWidget(QtGui.QLabel(' line number '))
self.mpl_toolbar.addWidget(self.line_info_box)
self.magenta_box = QtGui.QLineEdit()
self.magenta_box.setMinimumWidth(50)
self.connect(self.magenta_box, QtCore.SIGNAL('returnPressed()'), self.magenta_line)
self.magenta_label_box = QtGui.QLineEdit()
self.magenta_label_box.setMinimumWidth(50)
self.connect(self.magenta_label_box, QtCore.SIGNAL('returnPressed()'), self.magenta_line)
self.cyan_box = QtGui.QLineEdit()
self.cyan_box.setMinimumWidth(50)
self.connect(self.cyan_box, QtCore.SIGNAL('returnPressed()'), self.cyan_line)
self.cyan_label_box = QtGui.QLineEdit()
self.cyan_label_box.setMinimumWidth(50)
self.connect(self.cyan_label_box, QtCore.SIGNAL('returnPressed()'), self.cyan_line)
self.setStyleSheet("""QToolTip {
background-color: black;
color: lightgray;
min-width: 20em;
font-size: 14px;
font-family: "sans-serif";
border: black solid 10px
}""")
s = 'Click to execute the synthesis from the beginning.'
self.run_button_ToolTip = s
s = 'Click to update synthesis with changes in line intensities, profiles, and continuum parameters.'
self.adjust_button_ToolTip = s
s = 'Enter line number to get information on\n' \
'the reference line and on its satellites.'
self.line_info_box_ToolTip = s
s = 'Color excess E(B-V)\n\n' \
'Set with: \n' \
' e_bv = <float>\n\n' \
'Comment: \n' \
u' E(B-V) \u2248 C(H\u03B2) / 1.5'
self.ebv_box_ToolTip = s
s = 'Radial velocity in km/s\n\n' \
'Set with: \n' \
' obj_velo = <float>'
self.obj_velo_box_ToolTip = s
s = 'Minimum wavelength of the synthetic spectrum (in angstroms)\n\n' \
'Set with: \n' \
' limit_sp = (<xmin>, <xmax>)'
self.sp_min_box_ToolTip = s
s = 'Maximum wavelength of the synthetic spectrum (in angstroms)\n\n' \
'Set with: \n' \
' limit_sp = (<xmin>, <xmax>)'
self.sp_max_box_ToolTip = s
s = 'Minimum wavelength in the plots of spectra and residuals (in angstroms)\n\n' \
'Set with: \n' \
' x_plot_lims = (<xmin>, <xmax>)'
self.xlim_min_box_ToolTip = s
s = 'Maximum wavelength in the plots of spectra and residuals (in angstroms)\n\n' \
'Set with: \n' \
' x_plot_lims = (<xmin>, <xmax>)'
self.xlim_max_box_ToolTip = s
s = 'Minimum ordinate in the plot of spectra, in units of relative intensity \n\n' \
'Set with: \n' \
' y1_plot_lims = (<ymin>, <ymax>)'
self.y1lim_min_box_ToolTip = s
s = 'Maximum ordinate in the plot of spectra, in units of relative intensity\n\n' \
'Set with: \n' \
' y1_plot_lims = (<ymin>, <ymax>)'
self.y1lim_max_box_ToolTip = s
s = 'Minimum ordinate in the plot of residuals, in units of relative intensity\n\n' \
'Set with: \n' \
' y3_plot_lims = (<ymin>, <ymax>)'
self.y3lim_min_box_ToolTip = s
s = 'Maximum ordinate in the plot of residuals, in units of relative intensity\n\n' \
'Set with: \n' \
' y3_plot_lims = (<ymin>, <ymax>)'
self.y3lim_max_box_ToolTip = s
s = 'Check to retain the current limits of the plots while zooming and panning.'
self.fix_axes_cb_ToolTip = s
s = 'Check to show only lines with intensities above cut. \n\n' \
'Set with: \n' \
' show_selected_intensities_only = <boolean>'
self.cut_cb_ToolTip = s
s = 'Check to show only lines of selected ions. \n\n' \
'Set with: \n' \
' show_selected_ions_only = <boolean>'
self.ion_cb_ToolTip = s
s = 'Normalization factor, ratio between the intensity and the \n' \
u'observed flux of the reference line, usually 10\u2074/F(H\u03B2)\n\n' \
'Set with: \n' \
' sp_norm = <float>'
self.sp_norm_box_ToolTip = s
s = 'Rebinning factor, the odd integer factor by which the number of points \n' \
'of the original spectrum is multiplied in the rebinning process\n\n' \
'Set with: \n' \
' resol = <integer>\n\n' \
'Usage: \n' \
' Set to \'1\' if the resolution of the observed spectrum is large enough'
self.resol_box_ToolTip = s
s = 'Minimum relative intensity of lines to be shown. \n\n' \
'Set with: \n' \
' cut_plot2 = <float>'
self.cut2_box_ToolTip = s
s = 'Comma-separated list of selected ions, elements, or line numbers to be shown. \n\n' \
'Set with: \n' \
' selected_ions = [<ion1>,<ion2>,...]\n\n' \
'Examples: \n' \
' \'O III\' (or \'O_III\') to show the lines of O III\n' \
' \'O III*\' (or \'O_III*\') to show the lines of O III, O IIIfl, O III5g, etc\n' \
' \'O III, O IV\' to show the lines of O III and O IV\n' \
' \'O\' to show the lines of all O ions\n' \
' \'Fe, N\' to show the lines of all Fe and N ions\n' \
' <line number> to show the lines of that same ion'
self.ion_box_ToolTip = s
#
# Layout with box sizers
#
CommandLayout = QtGui.QGridLayout()
wList = [self.run_button,self.adjust_button]
Nrow = 2
for w in wList:
k = wList.index( w )
i = k%Nrow
j = 1+2*(k/Nrow)
CommandLayout.addWidget(w,i,j)
CommandLayout.setAlignment(w,QtCore.Qt.AlignCenter)
self.Command_GroupBox.setLayout(CommandLayout)
ObsSpecLayout = QtGui.QGridLayout()
lList = ['xmin', 'xmax', u'10\u2074/F(H\u03B2)', 'radial vel.', 'E(B-V)', 'N']
wList = [self.sp_min_box, self.sp_max_box, self.sp_norm_box, self.obj_velo_box, self.ebv_box, self.resol_box ]
Nrow = 2
for l in lList:
w = QtGui.QLabel(l)
k = lList.index( l )
i = k%Nrow
j = 2*(k/Nrow)
ObsSpecLayout.addWidget(w,i,j)
ObsSpecLayout.setAlignment(w,QtCore.Qt.AlignRight)
for w in wList:
k = wList.index( w )
i = k%Nrow
j = 1+2*(k/Nrow)
ObsSpecLayout.addWidget(w,i,j)
ObsSpecLayout.setAlignment(w,QtCore.Qt.AlignRight)
self.ObsSpec_GroupBox.setLayout(ObsSpecLayout)
SpecPlotLayout = QtGui.QGridLayout()
SpecPlotLayout.addWidget(QtGui.QLabel('xmin'),0,0)
SpecPlotLayout.addWidget(QtGui.QLabel('xmax'),1,0)
SpecPlotLayout.addWidget(QtGui.QLabel('ymin'),0,2)
SpecPlotLayout.addWidget(QtGui.QLabel('ymax'),1,2)
SpecPlotLayout.addWidget(self.xlim_min_box,0,1)
SpecPlotLayout.addWidget(self.xlim_max_box,1,1)
SpecPlotLayout.addWidget(self.y1lim_min_box,0,3)
SpecPlotLayout.addWidget(self.y1lim_max_box,1,3)
SpecPlotLayout.addWidget(self.fix_axes_cb,0,4)
self.SpecPlot_GroupBox.setLayout(SpecPlotLayout)
LineIDLayout = QtGui.QGridLayout()
LineIDLayout.addWidget(QtGui.QLabel('cut'),0,0)
LineIDLayout.addWidget(self.cut2_box,0,1)
LineIDLayout.addWidget(self.cut_cb,0,2)
LineIDLayout.addWidget(QtGui.QLabel('ion'),1,0)
LineIDLayout.addWidget(self.ion_box,1,1)
LineIDLayout.addWidget(self.ion_cb,1,2)
self.lineIDs_GroupBox.setLayout(LineIDLayout)
ResidualLayout = QtGui.QGridLayout()
ResidualLayout.addWidget(QtGui.QLabel('ymin'),0,0)
ResidualLayout.addWidget(QtGui.QLabel('ymax'),1,0)
ResidualLayout.addWidget(self.y3lim_min_box,0,1)
ResidualLayout.addWidget(self.y3lim_max_box,1,1)
self.residual_GroupBox.setLayout(ResidualLayout)
grid = QtGui.QGridLayout()
grid.addWidget(self.Command_GroupBox, 0, 1 )
grid.addWidget(self.ObsSpec_GroupBox, 0, 2 )
grid.addWidget(self.SpecPlot_GroupBox, 0, 3 )
grid.addWidget(self.residual_GroupBox, 0, 4 )
grid.addWidget(self.lineIDs_GroupBox, 0, 5 )
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.canvas)
vbox.addWidget(self.mpl_toolbar)
vbox.addLayout(grid)
#vbox.setAlignment(QtCore.Qt.AlignBottom)
self.main_frame.setLayout(vbox)
self.setCentralWidget(self.main_frame)
def create_status_bar(self):
self.status_text = QtGui.QLabel("pySSN, v{}".format(__version__))
self.statusBar().addWidget(self.status_text, 1)
def create_menu(self):
self.file_menu = self.menuBar().addMenu("File")
open_init_action = self.create_action("Open init file",
shortcut="",
slot=self.select_init,
tip="Open the initialization file and run the synthesis")
save_pars_action = self.create_action("Save parameters",
shortcut="Ctrl+S",
slot=self.save_pars_as,
tip="Save synthesis and plot parameters to file")
save_pars_as_action = self.create_action("Save parameters as",
shortcut="Ctrl+Shift+S",
slot=self.save_pars_as,
tip="Select file name and save parameters of the synthesis")
self.save_plot_action = self.create_action("Save plot",
shortcut="Ctrl+P",
slot=self.save_plot_as,
tip="Save plot to file")
save_plot_as_action = self.create_action("Save plot as",
shortcut="Ctrl+Shift+P",
slot=self.save_plot_as,
tip="Select file name and save plot")
save_lines_action = self.create_action("Save lines",
shortcut="Ctrl+L",
slot=self.save_lines_as,
tip="Save list of lines to file")
save_lines_as_action = self.create_action("Save lines as",
shortcut="Ctrl+Shift+L",
slot=self.save_lines_as,
tip="Select file name and save list of lines")
self.add_actions(self.file_menu,
(open_init_action, save_pars_action, None, self.save_plot_action, None, save_lines_action))
#(open_init_action, save_pars_action, save_pars_as_action, None, self.save_plot_action, save_plot_as_action, None, save_lines_action, save_lines_as_action))
self.line_sort_list = ['wavelength', 'decreasing wavelength', 'intensity', 'decreasing intensity', 'ion' , 'decreasing ion' ]
s = 'Sort lines by:\n'
for i in range(len(self.line_sort_list)):
s = s + ' ' + str(i) + ' - ' + self.line_sort_list[i] + '\n'
s = s + '\nSet with:\n' + ' save_lines_sort = <integer>'
self.line_sort_ag = QtGui.QActionGroup(self, exclusive=True)
self.line_sort_menu = self.file_menu.addMenu("Sort lines by")
self.line_sort_menu_ToolTip = ''
for i in range(len(self.line_sort_list)):
a = self.line_sort_ag.addAction(QtGui.QAction(self.line_sort_list[i], self, checkable=True))
self.line_sort_menu.addAction(a)
self.line_sort_ag.triggered.connect(self.line_sort)
self.line_print_dic = OrderedDict( [
( 'num' , 'line number' ),
( 'id' , 'ion' ),
( 'lambda' , 'wavelength' ),
( 'l_shift' , 'wavelength shift' ),
( 'l_tot' , 'corrected wavelength' ),
( 'i_rel' , 'intensity' ),
( 'i_cor' , 'intensity correction factor' ),
( 'i_tot' , 'corrected intensity' ),
( 'ref' , 'reference line number' ),
( 'profile' , 'line profile code number' ),
( 'vitesse' , 'natural line width' ),
( 'comment' , 'comment' ) ])
items = list(self.line_print_dic.values())
s = 'Fields to be printed:\n'
for i in range(len(items)):
s = s + ' ' + str(i) + ' - ' + items[i] + '\n'
s = s + '\nSet with:\n' + ' save_lines_fields = <list>'
self.line_field_menu = self.file_menu.addMenu("Show fields")
self.line_field_menu_ToolTip = ''
for i in range(len(items)):
a = self.create_action(items[i],
shortcut='', slot=self.set_line_fields_to_print, checkable=True,
tip=None)
self.line_field_menu.addAction(a)
self.file_menu.addMenu(self.line_field_menu)
self.show_header_action = self.create_action("Show header",
slot=self.set_show_header,
shortcut="",
checkable=True,
tip="Show header in list of lines")
self.file_menu.addAction(self.show_header_action)
self.open_cosmetic_file_action = self.create_action("Open cosmetic file",
slot=self.set_cosmetic_file,
shortcut="",
tip="Open the cosmetic file")
self.clean_cosmetic_file_action = self.create_action("Clean cosmetic file",
slot=self.clean_cosmetic_file,
shortcut="",
tip="Remove the unchanged lines from the cosmetic file")
self.empty_cosmetic_file_action = self.create_action("Empty cosmetic file",
slot=self.empty_cosmetic_file,
shortcut="",
tip="Remove all lines from the cosmetic file")
self.order_cosmetic_file_action = self.create_action("Order cosmetic file",
slot=self.order_cosmetic_file,
shortcut="",
tip="Order the cosmetic file by line number and remove duplicate lines")
quit_action = self.create_action("&Quit",
slot=self.fileQuit,
shortcut="Ctrl+Q",
tip="Close the application")
self.add_actions(self.file_menu, (None, self.open_cosmetic_file_action, self.clean_cosmetic_file_action,
self.order_cosmetic_file_action, self.empty_cosmetic_file_action, None, quit_action))
self.run_menu = self.menuBar().addMenu("Execute")
run_action = self.create_action("Run",
shortcut="Ctrl+F9",
slot=self.rerun,
tip="Execute synthesis from the beginning")
update_action = self.create_action("Update",
shortcut="F9",
slot=self.adjust,
tip="Update synthesis with changes in line intensities, profiles, and continuum parameters")
draw_action = self.create_action("Draw",
shortcut="F8",
slot=self.set_plot_limits_and_draw,
tip="Redraw plots")
post_proc_action = self.create_action("Post-process",
shortcut="Ctrl+F8",
slot=self.apply_post_proc,
tip="Edit the plots with python commands defined in an external file")
open_profile_action = self.create_action("Instrumental profile",
shortcut="F7",
slot=self.apply_instr_prof,
tip="Open the instrumental profile file and run the synthesis")
refine_wavelengths_action = self.create_action("Wavelength-refining",
slot=self.refine_wavelengths,
shortcut="F6",
tip="Refine the wavelength calibration")
self.add_actions(self.run_menu, (update_action, run_action, draw_action, None,
post_proc_action, open_profile_action, refine_wavelengths_action))
self.line_menu = self.menuBar().addMenu('Lines')
self.show_line_ticks_action = self.create_action('Plot line ticks',
shortcut='Alt+L', slot=self.show_line_ticks_action_clicked, checkable=True,
tip='Check to show line ticks')
self.plot_lines_action = self.create_action('Plot spectra of selected ions',
shortcut='Alt+P', slot=self.show_line_ticks_action_clicked, checkable=True,
tip='Check to plot spectra of selected ions')
self.selected_intensities_action = self.create_action('Only above the cut',
shortcut='Alt+K', slot=self.selected_lines_clicked, checkable=True,
tip='Check to show the ticks for lines with intensities above cut only')
self.selected_ions_action = self.create_action('Only for selected ions',
shortcut='Alt+I', slot=self.selected_lines_clicked, checkable=True,
tip='Check to show the line ticks for selected ions only')
self.add_actions(self.line_menu,
(self.plot_lines_action, None, self.show_line_ticks_action, self.selected_intensities_action, self.selected_ions_action))
self.diff_lines_list = ['ion and reference line', 'ion and process', 'ion', 'element' ]
s = 'Differentiate lines by:\n'
for i in range(len(self.diff_lines_list)):
s = s + ' ' + str(i) + ' - ' + self.diff_lines_list[i] + '\n'
s = s + '\nSet with:\n' + ' diff_lines_by = <integer>'
self.diff_lines_ag = QtGui.QActionGroup(self, exclusive=True)
self.diff_lines_menu = self.line_menu.addMenu("Differentiate lines by")
self.diff_lines_menu_ToolTip = ''
for i in range(len(self.diff_lines_list)):
a = self.diff_lines_ag.addAction(QtGui.QAction(self.diff_lines_list[i], self, checkable=True))
a.setShortcut('Alt+' + str(i+1))
self.diff_lines_menu.addAction(a)
self.diff_lines_ag.triggered.connect(self.diff_lines)
self.cycle_forwards_ions_action = self.create_action('Cycle forwards selected ions',
shortcut='Alt+0', slot=self.cycle_forwards_ions, checkable=False,
tip='Click to cycle forwards the selected ions')
self.cycle_backwards_ions = self.create_action('Cycle backwards selected ions',
shortcut='Alt+9', slot=self.cycle_backwards_ions, checkable=False,
tip='Click to cycle backwards the selected ions')
self.add_actions(self.line_menu,
(None, self.cycle_forwards_ions_action, self.cycle_backwards_ions, None))
self.line_tick_ax_menu = self.line_menu.addMenu('Window of line ticks')
self.line_tick_ax_list = ['Plot of spectra', 'Plot of residuals', 'Separate plot' ]
s = 'Show line ticks on:\n'
for i in range(len(self.line_tick_ax_list)):
s = s + ' ' + str(i) + ' - ' + self.line_tick_ax_list[i] + '\n'
s = s + '\nSet with:\n' + ' line_tick_ax = <integer>'
self.line_tick_ax_ag = QtGui.QActionGroup(self, exclusive=True)
self.line_tick_ax_menu_ToolTip = ''
for i in range(len(self.line_tick_ax_list)):
a = self.line_tick_ax_ag.addAction(QtGui.QAction(self.line_tick_ax_list[i], self, checkable=True))
self.line_tick_ax_menu.addAction(a)
self.line_tick_ax_ag.triggered.connect(self.set_plot_ax2)
self.line_tick_pos_menu = self.line_menu.addMenu('Position of line ticks')
self.line_tick_pos_list = ['Top', 'Middle', 'Bottom' ]
s = 'Position line ticks:\n'
for i in range(len(self.line_tick_pos_list)):
s = s + ' ' + str(i) + ' - ' + self.line_tick_pos_list[i] + '\n'
s = s + '\nSet with:\n' + ' line_tick_pos = <integer>'
self.line_tick_pos_ag = QtGui.QActionGroup(self, exclusive=True)
self.line_tick_pos_menu_ToolTip = ''
for i in range(len(self.line_tick_pos_list)):
a = self.line_tick_pos_ag.addAction(QtGui.QAction(self.line_tick_pos_list[i], self, checkable=True))
self.line_tick_pos_menu.addAction(a)
self.line_tick_pos_ag.triggered.connect(self.set_plot_ax2)
self.line_tick_color_action = self.create_action('Color of line ticks',
shortcut=None, slot=self.line_tick_color_clicked, checkable=False,
tip='Set color of line ticks')
self.toggle_legend_action = self.create_action('Toggle legend position and zoom',
shortcut='Alt+Shift+L', slot=self.toggle_legend_clicked, checkable=False,
tip='Toggle the legend position and zoom')
self.line_menu.addAction(self.toggle_legend_action)
self.editing_lines_action = self.create_action('Allow editing line parameters',
slot=self.editing_lines_clicked, checkable=True,
tip='Check to allow editing line parameters in line info dialog')
self.update_lines_action = self.create_action('Update after editing line parameters',
shortcut='Alt+U', slot=self.update_lines_clicked, checkable=True,
tip='Check to update synthesis after editing line parameters in line info dialog')
self.show_line_ticks_from_file_action = self.create_action('Plot line ticks from file',
shortcut='F4', slot=self.show_line_ticks_from_file,
tip='Check to show line ticks defined in an external file')
self.ask_tickfile_action = self.create_action("Ask for file name",
checkable=True, tip="Check to be always asked for the text file containing a list of wavelengths to be ticked")
self.add_actions(self.line_menu, (None, self.show_line_ticks_from_file_action))
self.cont_menu = self.menuBar().addMenu('Continuum')
self.plot_cont_action = self.create_action('Plot continuum',
shortcut="Alt+C",
slot=self.plot_cont_action_clicked,
checkable=True,
tip='Check to plot the different components of the continuum spectrum')
self.cont_action = self.create_action('Parameters',
shortcut="Shift+Alt+C",
slot=self.cont_dialog,
tip='Parameters of the continuum spectrum')
self.interpol_cont_action = self.create_action('User-defined continuum',
shortcut="F5",
slot=self.user_continuum,
tip='Open dialog to set the user-defined continuum spectrum')
self.add_actions(self.cont_menu,
(self.plot_cont_action, self.cont_action, self.interpol_cont_action,))
self.settings_menu = self.menuBar().addMenu('Settings')
self.verbosity_list = ['None', 'Errors', 'Errors and warnings', 'Errors, warnings, and comments', 'Debug messages' ]
s = 'Verbosity level:\n'
for i in range(len(self.verbosity_list)):
s = s + ' ' + str(i) + ' - ' + self.verbosity_list[i] + '\n'
s = s + '\nSet with:\n' + ' log_level = <integer>'
self.verbosity_ag = QtGui.QActionGroup(self, exclusive=True)
#self.verbosity_menu = self.menuBar().addMenu("Verbosity")
self.verbosity_menu = self.settings_menu.addMenu("Verbosity")
self.verbosity_menu_ToolTip = ''
for i in range(len(self.verbosity_list)):
a = self.verbosity_ag.addAction(QtGui.QAction(self.verbosity_list[i], self, checkable=True))
self.verbosity_menu.addAction(a)
self.verbosity_ag.triggered.connect(self.verbosity)
self.style_list = list(QtGui.QStyleFactory.keys())
s = 'Widget styles:\n'
for i in range(len(self.style_list)):
s = s + ' ' + str(i) + ' - ' + self.style_list[i] + '\n'
s = s + '\nSet with:\n' + ' qt_style = <integer>'
self.style_ag = QtGui.QActionGroup(self, exclusive=True)
self.style_menu = self.settings_menu.addMenu('Widget style')
self.style_menu_ToolTip = ''
for i in range(len(self.style_list)):
a = self.style_ag.addAction(QtGui.QAction(self.style_list[i], self, checkable=True))
self.style_menu.addAction(a)
self.style_ag.triggered.connect(self.style)
self.enable_tooltips_action = self.create_action('Enable tooltips',
slot=self.enable_tooltips_action_clicked, checkable=True,
tip='Check to enable tooltips')
self.adjust_fig_action = self.create_action('Adjust figure',
slot=self.adjust_fig_action_clicked, checkable=True,
tip='Automatically adjust figure to avoid overlaps and to minimize the empty borders.')
self.show_uncor_obs_action = self.create_action('Show uncorrected spectrum',
slot=self.show_uncor_obs_action_clicked, checkable=True,
tip='Show observational spectrum without the wavelength refining.')
self.add_actions(self.settings_menu,
(None, self.enable_tooltips_action, self.adjust_fig_action, None, self.editing_lines_action, self.update_lines_action, self.show_uncor_obs_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def fileQuit(self):
self.close()
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QtGui.QAction(text, self)
if icon is not None:
action.setIcon(QtGui.QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, QtCore.SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
def isInteger(self, str_):
try:
int(str_)
return True
except ValueError:
return False
def isPositiveInteger(self, str_):
if self.isInteger(str_):
n = int(str_)
if n > 0:
return True
else:
return False
else:
return False
def isPositiveOdd(self, str_):
if self.isInteger(str_):
n = int(str_)
if n%2 == 1 and n > 0:
return True
else:
return False
else:
return False
def isFloat(self, str_):
try:
np.float(str_)
return True
except ValueError:
return False
def floatFixFormat(self, r, fix_fmt, align='>'):
"""
floatFixFormat(1.23456789, '{:7.3f}') = ' 1.234'
floatFixFormat(-1.23456789, '{:7.3f}') = ' -1.234'
floatFixFormat(123.456789, '{:7.3f}') = ' 1.23e2'
floatFixFormat(-123.456789, '{:7.3f}') = '-1.23e2'
floatFixFormat(1.23456789e+04, '{:7.3f}') = ' 1.23e4'
floatFixFormat(1.23456789e-04, '{:7.3f}') = ' 1.2e-4'
floatFixFormat(1.23456789e+34, '{:7.3f}') = ' 1.2e34'
floatFixFormat(99.999999, '{:7.3f}') = ' 1.2e34'
"""
if not ( 'f' in fix_fmt and self.isFloat(r) ):
return None
s = fix_fmt.strip('{')
s = s.strip('}')
s = s.strip(':')
s = s.strip('f')
k = s.index('.')
w = int(s[:k])
p = int(s[k+1:])
s0 = '{:{align}{w}.{p}f}'.format(float(abs(r)), w=w-1, p=p, align=align)
s = '{:0.{w}e}'.format(float(abs(r)), w=w)
if r < 0:
sgn = '-'
else:
sgn = ''
k = s.index('e')
mantissa = s[:k]
mantissa = mantissa[:p+2]
e = int(s[k+1:])
if p+e+2>w-3-len(str(e)) and len(s0) < w:
s = s0.strip()
else:
s = '{:0.{p}e}'.format(float(abs(r)), p=min(p,w-4-len(str(e))))
k = s.index('e')
mantissa = s[:k]
exponent = str(int(s[k+1:]))
s = mantissa + 'e' + exponent
s = '{:{align}{w}}'.format(sgn+s, w=w, align=align)
return s
def rightFormat(self, s, field):
if field == 'comment':
output = s.strip()
return output
try:
if field == 'profile':
r = int(s)
else:
r = np.float(s)
fmt = self.sp.field_format[field]
if 'f' in fmt:
s = self.floatFixFormat(r, fmt)
else:
s = fmt.format(r)
if len(s) == self.sp.field_width[field] and not np.isinf(r):
if field == 'vitesse' and (r < 0 or s.strip() == '0.00'):
output = None
else:
output = s
else:
output = None
except:
output = None
return output
def ConvStrToValidTypes(self, str_):
str_ = str_.replace('Error in ','')
str_ = str_.replace(' ','')
if str_ == '':
result = None
elif str_.isdigit():
result = int(str_)
elif self.isFloat(str_):
result = np.float(str_)
elif str_.capitalize() == 'True':
result = True
elif str_.capitalize() == 'False':
result = False
elif str_.find('(') >= 0:
try:
str_ = str_.replace('[','')
str_ = str_.replace(']','')
str_ = str_.strip('[]()')
result = [(float(s.split(',')[0]),float(s.split(',')[1])) for s in str_.split('),(')]
except:
result = None
elif str_.find(',') >= 0:
try:
str_ = str_.replace('[','')
str_ = str_.replace(']','')
result = [float(i) for i in str_.split(',')]
except:
result = None
else:
result = str_
return result
def save_par_in_file(self, field, value, path, help_=None):
if self.isValidFilename(path):
if os.path.isfile(path):
f = open(path, 'r')
lines = f.readlines()[::-1]
f.close()
else:
lines = []
j = 0
found = False
while ( j < len(lines) ) and ( not found ):
line = str(lines[j])
if line.find(field) == 0:
if type(value) is str:
s0 = ' = \''
s1 = '\'\n'
else:
s0 = ' = '
s1 = '\n'
line = '# ' + line + field + s0 + value + s1
lines[j] = line
found = True
break
j += 1
if not found:
if help_ is not None:
lines.insert(0, '\n# ' + help_ + '\n')
lines.insert(0, field + ' = ' + value + '\n')
lines = lines[::-1]
f = open(path, 'w')
f.writelines(lines)
f.close()
def save_cont_pars(self):
file_choices = "Python files (*.py) (*.py);;Text files (*.txt *.dat) (*.txt *.dat);;All Files (*) (*)"
filename = self.sp.config_file.split('/')[-1]
path = unicode(QtGui.QFileDialog.getSaveFileName(self, 'Save to file', filename, file_choices))
if path:
if os.path.isfile(path):
f = open(path, 'r')
lines = f.readlines()[::-1]
f.close()
else:
lines = []
for i in range(0, self.table.rowCount()):
field = str(self.table.item(i,0).text())
value = str(self.table.item(i,1).text())
help_ = str(self.table.item(i,2).text().toUtf8())
help_ = help_.replace('\xce\xb2', 'beta')
help_ = help_.replace('\xe2\x81\xbb\xc2\xb3', '-3')
help_ = help_.replace('\xce\xb1', 'alpha')
help_ = help_.replace('\xce\xbb/5000 \xe2\x84\xab', 'lambda/5000 A')
j = 0
found = False
while ( j < len(lines) ) and ( not found ):
line = str(lines[j])
if line.find(field) == 0:
k = line.find('#')
if k > 0:
comment = ' ' + line[k:]
else:
comment = '\n'
line = field + ' = ' + value + comment
lines[j] = line
found = True
break
j += 1
if not found:
lines.insert(0, '\n# ' + help_ + '\n')
lines.insert(0, field + ' = ' + value + '\n')
lines = lines[::-1]
f = open(path, 'w')
f.writelines(lines)
f.close()
def get_shifts_from_profile(self, profile_key):
if profile_key not in self.sp.emis_profiles:
profile_key = '1'
vel = self.sp.emis_profiles[profile_key]['vel']
par_list = self.sp.emis_profiles[profile_key]['params']
shift_list = []
for item in par_list:
shift = np.float(item[2])
intensity = np.float(item[1])
if item[0]=='G' and ( intensity > 0.2 ):
shift_list.append(shift)
shift_list.sort()
return shift_list, vel
def plot_tick_at(self, wavelength, ion, line_num):
if self.green_tick_shown:
self.on_draw()
color = 'green'
ion = ion.replace('_',' ').strip()
to_select = (self.sp.liste_raies['num'] == np.int(line_num))
vitesse = self.sp.liste_raies[to_select]['vitesse']
profile_key = str(self.sp.liste_raies[to_select]['profile'][0])
shift_list, vel = self.get_shifts_from_profile(profile_key)
line_num = line_num.strip().strip('0')
# label = ion + ' (' + line_num.strip() + ')'
label = ion + ' {:.2f}'.format(wavelength)
posTick = self.getTickPosOfSelectedLine()
y1, y2 = self.get_line_tick_lim(posTick)
k = self.sp.get_conf('line_tick_ax')
if not (k == 1 and self.residual_GroupBox.isChecked()):
k = 0
if len(shift_list) > 0:
if posTick == 0:
ys1 = 2*y1-y2
ys2 = y1
ym = y1
else:
ys1 = y2
ys2 = 2*y2-y1
ym = y2
if k == 0:
yy1 = self.y1_plot_lims[0] + ym*(self.y1_plot_lims[1] - self.y1_plot_lims[0])
else:
yy1 = self.y3_plot_lims[0] + ym*(self.y3_plot_lims[1] - self.y3_plot_lims[0])
current_legend_loc = self.sp.legend_loc
f = 0.15
r = (self.x_plot_lims[1] - self.x_plot_lims[0])/2
if wavelength - self.x_plot_lims[0] < 2*r*f:
current_legend_loc = 1
if self.x_plot_lims[1] - wavelength < 2*r*f:
current_legend_loc = 2
self.fig.axes[k].axvline( wavelength, y1, y2, color = color, linestyle = 'solid', linewidth = 2.5 )
wave_shifts = -vitesse*wavelength*shift_list / CST.CLIGHT * 1e5 + wavelength*vel / CST.CLIGHT * 1e5
if len(wave_shifts) > 0:
max_wave_shift = max(abs(wave_shifts))
else:
max_wave_shift = 0
# Ticks for the profiles components are not shown if they are within 1000*f percent of the x-axis width.
f = 0.001
if max_wave_shift > f*(self.x_plot_lims[1] - self.x_plot_lims[0]):
x1 = (wavelength - self.x_plot_lims[0])/(self.x_plot_lims[1] - self.x_plot_lims[0])
for shift in wave_shifts:
self.fig.axes[k].axvline( wavelength+shift, ys1, ys2, color = color, linestyle = '--', linewidth = 2.5 )
x2 = (wavelength + shift - self.x_plot_lims[0])/(self.x_plot_lims[1] - self.x_plot_lims[0])
self.fig.axes[k].axhline( yy1, x1, x2, color = color, linestyle = '-', linewidth = 1.0 )
if self.addGreenTickToLegend:
self.fig.axes[k].step( [0,0], [0,100], color = color, linestyle = 'solid', label = label, linewidth = 2.5 )
self.fig.axes[k].legend(loc=current_legend_loc, fontsize=self.sp.legend_fontsize)
self.fig.canvas.draw()
self.green_tick_shown = True
self.magenta_tick_shown = False
def show_line_info_dialog(self):
def get_window_size_and_position():
if self.line_info_dialog is None:
font = QtGui.QFont()
width = QtGui.QFontMetrics(font).width('='*120)
self.line_info_dialog_width = width
self.line_info_dialog_height = 470
sG = QtGui.QApplication.desktop().screenGeometry()
self.line_info_dialog_x = sG.width()-self.line_info_dialog_width
self.line_info_dialog_y = 0
else:
self.line_info_dialog_width = self.line_info_dialog.width()
self.line_info_dialog_height = self.line_info_dialog.height()
self.line_info_dialog_x = self.line_info_dialog.pos().x()
self.line_info_dialog_y = self.line_info_dialog.pos().y()
def save_initial_plot_pars():
self.init_line_num = self.line_info_box.text()
self.init_ion = self.ion_box.text()
self.init_xmin = self.xlim_min_box.text()
self.init_xmax = self.xlim_max_box.text()
self.init_y1min = self.y1lim_min_box.text()
self.init_y1max = self.y1lim_max_box.text()
self.init_y3min = self.y3lim_min_box.text()
self.init_y3max = self.y3lim_max_box.text()
self.init_legend_fontsize = self.sp.legend_fontsize
self.init_legend_loc = self.sp.legend_loc
def toggle_statusbar():
self.showStatusBar = not self.showStatusBar
statusBar.setVisible(self.showStatusBar)
def redo_initial_plot():
self.line_info_box.setText(self.init_line_num)
self.ion_box.setText(self.init_ion)
self.xlim_min_box.setText(self.init_xmin)
self.xlim_max_box.setText(self.init_xmax)
self.y1lim_min_box.setText(self.init_y1min)
self.y1lim_max_box.setText(self.init_y1max)
self.y3lim_min_box.setText(self.init_y3min)
self.y3lim_max_box.setText(self.init_y3max)
self.sp.legend_fontsize = self.init_legend_fontsize
self.sp.legend_loc = self.init_legend_loc
self.set_plot_limits_and_draw()
#self.save_from_lim_boxes()
#self.draw_ion()
def do_reset():
self.curr_line_num = self.init_line_num
get_info(self.curr_line_num)
fill_line_info_table()
redo_initial_plot()
def toggle_show_satellites():
self.show_satellites = (self.show_satellites + 1)%3
fill_line_info_table()
def on_click():
item = self.line_info_table.currentItem()
row = item.row()
col = item.column()
s = item.text()
if col == col_ion:
ion = self.line_info_table.item(row, col).text()
self.ion_box.setText(ion)
self.draw_ion()
if not self.isFloat(s):
return
if col in [col_num, col_ref] and int(s) != 0:
self.curr_line_num = s
get_info(self.curr_line_num)
self.line_info_box.setText(self.curr_line_num)
fill_line_info_table()
def on_doubleClick():
item = self.line_info_table.currentItem()
row = item.row()
col = item.column()
s = item.text()
if col == col_ion:
ion = self.line_info_table.item(row, col).text()
self.ion_box.setText(ion)
self.draw_ion()
if not self.isFloat(s):
return
if col in [col_num, col_ref] and int(s) != 0:
self.curr_line_num = s
get_info(self.curr_line_num)
self.line_info_box.setText(self.curr_line_num)
fill_line_info_table()
def on_itemClicked():
# to avoid blinking with itemSelectionChanged
item = self.line_info_table.currentItem()
if item == self.selected_item:
on_itemSelectionChanged()
def on_itemSelectionChanged():
if self.green_tick_shown:
self.on_draw()
self.green_tick_shown = False
item = self.line_info_table.currentItem()
if item == None:
self.draw_ion()
return
self.selected_item = item
row = item.row()
col = item.column()
s = item.text()
l_shift_refline = np.float(self.sp.fieldStrFromLine(self.refline,'l_shift'))
if col == col_wave:
wavelength = np.float(s)
ion = str(self.line_info_table.item(row, col_ion).text())
line_num = str(self.line_info_table.item(row, col_num).text())
max_wave = np.float(self.sp_max_box.text())
min_wave = np.float(self.sp_min_box.text())
if wavelength > min_wave and wavelength < max_wave:
l_shift = np.float(self.line_info_table.item(row, col_lshift).text())
wavelength = wavelength + l_shift + l_shift_refline
r = (self.x_plot_lims[1] - self.x_plot_lims[0])/2
f = 0.05
if (wavelength < self.x_plot_lims[0] + f*r) or (wavelength > self.x_plot_lims[1] - f*r):
if wavelength-r < min_wave:
self.x_plot_lims = (min_wave-r*f, min_wave-r*f+2*r)
elif wavelength+r > max_wave:
self.x_plot_lims = (max_wave+r*f-2*r , max_wave+r*f)
else:
self.x_plot_lims = (wavelength-r,wavelength+r)
if not self.axes_fixed:
self.update_lim_boxes()
self.restore_axes()
self.plot_tick_at(wavelength, ion, line_num)
elif wavelength == 1:
if str(self.line_info_table.item(row, col_ref).text()) == '0000000000000':
satellites = self.satellites
else:
satellites = self.sp.read_satellites(self.sp.phyat_file, int(line_num))
satellites = add_satellites_of_subreferences(satellites)
SelectedSatellites = []
max_wave = np.float(self.sp_max_box.text())
min_wave = np.float(self.sp_min_box.text())
for i in range(0, len(satellites)):
wavelength = np.float(self.sp.fieldStrFromLine(satellites[i],'lambda'))
if (wavelength > min_wave) and (wavelength < max_wave):
SelectedSatellites.append(satellites[i])
satellites = SelectedSatellites
self.plot_line_ticks_for(satellites, ion, line_num, self.refline)
def isRefLine(line):
s = self.sp.fieldStrFromLine(line,'ref').strip()
if s == '0000000000000':
return True
else:
return False
def isSubRefLine(line):
wavelength = np.float(self.sp.fieldStrFromLine(line,'lambda'))
if not isRefLine(line) and (wavelength < 2.0):
return True
else:
return False
def fill_data(i, line, cat=''):
if line == None:
return
editableCols = []
if self.sp.get_conf('qt_allow_editing_lines', False):
if cat == 'sat':
if do_cosmetics:
editableCols = ['l_shift', 'i_cor', 'profile', 'vitesse', 'comment']
else:
editableCols = []
elif cat == 'subref':
if do_cosmetics:
editableCols = ['i_cor', 'comment']
else:
editableCols = []
elif cat == 'ref':
editableCols = ['l_shift', 'i_cor', 'i_rel', 'profile', 'vitesse', 'comment']
for j in range(0,len(fieldItems)):
s = self.sp.fieldStrFromLine(line, fieldItems[j])
s = s.strip()
if j == col_ion:
if self.show_true_ions:
s = self.sp.true_ion(s).replace('_',' ').strip()
isPseudoIon = self.sp.isPseudoIon(s)
if j == fieldItems.index('proc'):
if isRefLine(line):
s = ''
elif isPseudoIon:
s = ''
else:
s = self.sp.process[s]
item = QtGui.QTableWidgetItem(s)
if fieldItems[j] in editableCols:
item.setBackgroundColor(self.editableCells_bg_color)
else:
item.setFlags(item.flags() ^ QtCore.Qt.ItemIsEditable)
item.setBackgroundColor(self.readOnlyCells_bg_color)
self.line_info_table.setItem(i,j,item)
def fill_text(i, text):
item = QtGui.QTableWidgetItem(text)
item.setFlags(item.flags() ^ (QtCore.Qt.ItemIsEditable|QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEnabled))
item.setBackgroundColor(self.readOnlyCells_bg_color)
item.setTextAlignment(QtCore.Qt.AlignBottom)
item.setTextColor(QtCore.Qt.blue)
self.line_info_table.setItem(i,0,item)
self.line_info_table.setSpan(i,0,2,len(fieldItems))
def add_satellites_of_subreferences(satellites):
subref_list = []
all_satellites = satellites
for sat_line in satellites:
if isSubRefLine(sat_line):
subref_list.append(sat_line)
i = 0
while i < len(subref_list):
sat_line_num = self.sp.fieldStrFromLine(subref_list[i],'num')
new_satellites = self.sp.read_satellites(self.sp.phyat_file, int(sat_line_num))
for line in new_satellites:
if isSubRefLine(line):
subref_list.append(line)
i += 1
for line in new_satellites:
if not line in all_satellites:
all_satellites.append(line)
return all_satellites
def get_info(line_num):
line = None
refline = None
subrefline = None
LineList = []
if int(line_num) == 0:
return
while refline == None:
refline = self.sp.read_line(self.sp.fic_model, int(line_num))
if refline is None:
if do_cosmetics:
curr_line = self.sp.read_line(self.sp.fic_cosmetik, int(line_num))
else:
curr_line = None
if self.sp.cosmetic_line_ok(curr_line) is not True:
curr_line = None
if curr_line == None:
curr_line = self.sp.read_line(self.sp.phyat_file, int(line_num))
LineList.append(curr_line)
line_num = self.sp.fieldStrFromLine(curr_line,'ref')
if len(LineList) > 0:
if isSubRefLine(LineList[0]):
subrefline = LineList[:1]
else:
line = LineList[0]
if len(LineList) > 1:
subrefline = LineList[1:]
if subrefline is not None:
n_subref = len(subrefline)
else:
n_subref = 0
subsatellites = []
for k in range(0, n_subref):
subsat = []
subrefline_num = self.sp.fieldStrFromLine(subrefline[k], 'num')
subsat = self.sp.read_satellites(self.sp.phyat_file, int(subrefline_num))
n_subsat = len(subsat)
if do_cosmetics:
for i in range(0,n_subsat):
sat_line = subsat[i]
sat_line_num = int(self.sp.fieldStrFromLine(sat_line,'num'))
cosmetic_line = self.sp.read_line(self.sp.fic_cosmetik, sat_line_num)
if cosmetic_line is not None:
subsat[i] = cosmetic_line
subsatellites = subsatellites + subsat
subsatellites = add_satellites_of_subreferences(subsatellites)
n_subsat = len(subsatellites)
if refline is not None:
refline_num = self.sp.fieldStrFromLine(refline,'num')
satellites = self.sp.read_satellites(self.sp.phyat_file, int(refline_num))
satellites = add_satellites_of_subreferences(satellites)
n_sat = len(satellites)
if do_cosmetics:
for i in range(0,n_sat):
sat_line = satellites[i]
sat_line_num = int(self.sp.fieldStrFromLine(sat_line,'num'))
cosmetic_line = self.sp.read_line(self.sp.fic_cosmetik, sat_line_num)
if cosmetic_line is not None:
satellites[i] = cosmetic_line
else:
n_sat = 0
if line is None and refline is None:
title = 'Error in line info dialog'
msg = 'Line number not found.'
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
self.line = line
self.subrefline = subrefline
self.refline = refline
self.subsatellites = subsatellites
self.satellites = satellites
self.n_sat = n_sat
self.n_subsat = n_subsat
self.n_subref = n_subref
def do_sort(lines):
waves = []
for i in range(0,len(lines)):
waves.append(self.sp.fieldStrFromLine(lines[i], 'lambda'))
lines = [x for _,x in sorted(zip(waves,lines))]
return lines
def fill_line_info_table():
self.line_info_table.blockSignals(True)
line = self.line
subrefline = self.subrefline
refline = self.refline
subsatellites = self.subsatellites
satellites = self.satellites
n_sat = self.n_sat
n_subsat = self.n_subsat
n_subref = self.n_subref
SelectedSatellites = []
SelectedSubSatellites = []
if self.show_satellites == 0:
n_sat = 0
n_subsat = 0
else:
max_wave = np.float(self.sp_max_box.text())
min_wave = np.float(self.sp_min_box.text())
for i in range(0, len(satellites)):
wavelength = np.float(self.sp.fieldStrFromLine(satellites[i],'lambda'))
if self.show_satellites == 2 or \
(self.show_satellites == 1 and (wavelength > min_wave) and (wavelength < max_wave)):
SelectedSatellites.append(satellites[i])
for i in range(0, len(subsatellites)):
wavelength = np.float(self.sp.fieldStrFromLine(subsatellites[i],'lambda'))
if self.show_satellites == 2 or \
(self.show_satellites == 1 and (wavelength > min_wave) and (wavelength < max_wave)):
SelectedSubSatellites.append(subsatellites[i])
n_sat = len(SelectedSatellites)
n_subsat = len(SelectedSubSatellites)
self.line_info_table.clearContents()
self.line_info_table.setRowCount(n_sat+n_subsat+20)
self.line_info_table.clearSpans()
k = 0
sat_list = []
if line is not None:
fill_text(k,'Line:')
k += 2
fill_data(k, line, 'sat')
k += 1
if subrefline is not None:
fill_text(k,'Subreference line:')
k += 2
for i in range(0,n_subref):
fill_data(k, subrefline[i], 'subref')
k += 1
if n_subsat > 0:
SelectedSubSatellites = do_sort(SelectedSubSatellites)
fill_text(k, str(n_subsat) + ' satellites:')
sat_list.append([k,n_subsat])
k += 2
for i in range(0,n_subsat):
if isSubRefLine(SelectedSubSatellites[i]):
fill_data(k+i, SelectedSubSatellites[i], 'subref')
else:
fill_data(k+i, SelectedSubSatellites[i], 'sat')
k += n_subsat
fill_text(k,'Reference line:')
k += 2
fill_data(k, refline, 'ref')
k += 1
if n_sat > 0:
SelectedSatellites = do_sort(SelectedSatellites)
fill_text(k, str(n_sat) + ' satellites:')
sat_list.append([k,n_sat])
k += 2
for i in range(0,n_sat):
if isSubRefLine(SelectedSatellites[i]):
fill_data(k+i, SelectedSatellites[i], 'subref')
else:
fill_data(k+i, SelectedSatellites[i], 'sat')
k += n_sat
self.line_info_table.setRowCount(k)
self.line_info_table.resizeColumnsToContents()
self.line_info_table.resizeRowsToContents()
self.line_info_table.blockSignals(False)
self.line_info_table.blockSignals(True)
if self.show_satellites == 1:
s0 = ' (in the synthesis range)'
elif self.show_satellites == 2:
s0 = ' (in the entire database and including subreferences)'
else:
s0 = ''
for i in sat_list:
k = i[0]
n = i[1]
fill_text(k, str(n) + ' satellites:' + s0)
self.line_info_table.blockSignals(False)
def on_itemChanged():
self.line_info_table.blockSignals(True)
item = self.line_info_table.currentItem()
if not (item.flags() & QtCore.Qt.ItemIsEditable):
self.line_info_table.blockSignals(False)
return
row = item.row()
col = item.column()
s = str(item.text())
value = self.rightFormat(s, fieldItems[col])
if value != None:
self.line_info_table.setItem(row, col, QtGui.QTableWidgetItem(value.strip()))
self.line_info_table.item(row, col).setBackgroundColor(self.editableCells_bg_color)
save_change(row,col)
else:
self.line_info_table.item(row, col).setBackgroundColor(QtGui.QColor('red'))
title = 'Invalid format for the ' + self.sp.field_tip[fieldItems[col]]
s0 = self.sp.field_format[fieldItems[col]]
s0 = s0[2:-1]
msg = "'" + s + "' can not be converted into the proper field format: " + s0
if col == self.sp.fields.index('vitesse'):
msg = msg + '\nor it is not a positive number.'
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
get_info(self.curr_line_num)
fill_line_info_table()
self.line_info_table.blockSignals(False)
def get_line_from_table(row):
line = ' '*85
jList = range(0,len(fieldItems))
jList.remove(col_proc)
for j in jList:
s = self.line_info_table.item(row,j).text()
width = self.sp.field_width[fieldItems[j]]
align = self.sp.field_align[fieldItems[j]]
pos = self.sp.field_pos[fieldItems[j]]
s = '{:{a}{w}s}'.format(s, a=align, w=width)
line = line[:pos] + s + line[pos:]
line = line.rstrip()
return line
def save_change(row, col):
line = get_line_from_table(row)
if isRefLine(line):
filename = self.sp.fic_model
else:
filename = self.sp.fic_cosmetik
self.sp.replace_line(filename, line)
if col != self.sp.fields.index('comment') and \
self.sp.get_conf('qt_update_after_editing_lines', False):
self.adjust()
self.nearbyLines = self.sp.get_nearby_lines(self.cursor_w1, self.cursor_w2, do_print=False)
if self.nearbyLines is not None and self.nearbyLines_dialog.isVisible():
self.fill_nearbyLines_table()
def init_lines():
self.line = None
self.subrefline = None
self.refline = None
self.subsatellites = []
self.satellites = []
self.n_sat = 0
self.n_subsat = 0
self.n_subref = 0
statusBar = QtGui.QStatusBar()
s = 'Click on \"Satellites\" to cycle the tri-state display of satellite lines:\n' \
' 1 - The satellite lines in the spectral range of the synthesis are shown; \n' \
' 2 - All satellite lines (including subreference lines and lines outside the spectral range of the synthesis) are shown. \n' \
' 3 - No satellite line is shown; \n' \
'Double-click on a line number to show the data for that line. \n' \
'Double-click on an ion to plot line ticks and spectrum for that single ion. \n' \
'Select or click on a wavelength to draw a tick at that position and recenter the spectrum if necessary. \n' \
'Click on \"Reset\" to return to the original line and plot settings. \n' \
'The green fields are editable.'
statusBar.addWidget(QtGui.QLabel(s),1)
self.showStatusBar = False
statusBar.setVisible(self.showStatusBar)
self.show_satellites = 1
get_window_size_and_position()
if self.line_info_dialog is not None:
self.line_info_dialog.close()
self.line_info_table.close()
self.line_info_dialog = QtGui.QDialog()
self.line_info_dialog.resize(self.line_info_dialog_width,self.line_info_dialog_height)
self.line_info_dialog.move(self.line_info_dialog_x,self.line_info_dialog_y)
self.line_info_table = QtGui.QTableWidget()
fieldItems = self.sp.fields
fieldNames = [ self.sp.field_abbr[item] for item in fieldItems ]
col_num = fieldItems.index('num')
col_ion = fieldItems.index('id')
col_wave = fieldItems.index('lambda')
col_proc = fieldItems.index('proc')
col_lshift = fieldItems.index('l_shift')
col_irel = fieldItems.index('i_rel')
col_icor = fieldItems.index('i_cor')
col_ref = fieldItems.index('ref')
col_prof = fieldItems.index('profile')
col_vel = fieldItems.index('vitesse')
col_comm = fieldItems.index('comment')
self.line_info_table.setColumnCount(len(fieldItems))
self.line_info_table.setHorizontalHeaderLabels(fieldNames)
if self.enable_tooltips_action.isChecked():
for j in range(0,len(fieldItems)):
self.line_info_table.horizontalHeaderItem(j).setToolTip(self.sp.field_tip[fieldItems[j]])
self.line_info_table.horizontalHeaderItem(col_vel).setText(u'\u0394v (factor)')
if self.enable_tooltips_action.isChecked():
s = 'For a reference line, it is the thermal broadening parameter, in km/s. \n' \
'For satellite line, it is the dimensionless correction factor for the thermal broadening parameter with respect to the reference line.'
self.line_info_table.horizontalHeaderItem(col_vel).setToolTip(s)
self.line_info_table.horizontalHeaderItem(col_comm).setTextAlignment(QtCore.Qt.AlignLeft)
self.line_info_table.horizontalHeaderItem(col_comm).setText(' comment')
init_lines()
do_cosmetics = self.sp.get_conf('do_cosmetik')
save_initial_plot_pars()
self.curr_line_num = self.line_info_box.text()
get_info(self.curr_line_num)
fill_line_info_table()
self.buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Help|
QtGui.QDialogButtonBox.Close|
QtGui.QDialogButtonBox.Reset|
QtGui.QDialogButtonBox.Apply)
self.buttonBox.button(QtGui.QDialogButtonBox.Apply).setText("Satellites")
if self.enable_tooltips_action.isChecked():
self.buttonBox.button(QtGui.QDialogButtonBox.Apply).setToolTip("Click to toggle the satellite lines")
self.buttonBox.button(QtGui.QDialogButtonBox.Apply).clicked.connect(toggle_show_satellites)
s = "Click to return to the initial states of the line info dialog and figures"
if self.enable_tooltips_action.isChecked():
self.buttonBox.button(QtGui.QDialogButtonBox.Reset).setToolTip(s)
self.buttonBox.button(QtGui.QDialogButtonBox.Reset).clicked.connect(do_reset)
self.buttonBox.button(QtGui.QDialogButtonBox.Help).clicked.connect(toggle_statusbar)
self.buttonBox.rejected.connect(self.line_info_dialog.close)
self.line_info_table.doubleClicked.connect(on_doubleClick)
self.line_info_table.itemChanged.connect(on_itemChanged)
self.selected_item = None
self.line_info_table.itemSelectionChanged.connect(on_itemSelectionChanged)
self.line_info_table.itemClicked.connect(on_itemClicked)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.line_info_table)
vbox.addWidget(self.buttonBox)
vbox.addWidget(statusBar)
self.line_info_dialog.setLayout(vbox)
self.line_info_dialog.setWindowTitle('line info dialog')
self.line_info_dialog.setWindowModality(QtCore.Qt.NonModal)
self.line_info_dialog.show()
def fill_nearbyLines_table(self):
if self.nearbyLines is None or self.nearbyLines_table is None:
return
k = self.sp.get_conf('diff_lines_by')
fieldItems = self.sp.fields
jList = range(0,len(fieldItems))
jProc = fieldItems.index('proc')
jList.remove(jProc)
if self.nearbyDialogFilterIsActive:
#selected_ions = self.sp.get_conf('selected_ions')
selected_ions = self.nearbyLines_selected_ions
selected_true_ions = [self.sp.true_ion(ion) for ion in selected_ions]
nearbyLines = []
for line in self.nearbyLines:
ion = str(line[fieldItems.index('id')]).strip()
true_ion = self.sp.true_ion(ion)
selectThisIon = (( ion in selected_ions or true_ion in selected_ions ) and k == 1) or (true_ion in selected_true_ions and k != 1)
if selectThisIon:
nearbyLines.append(line)
else:
nearbyLines = self.nearbyLines
self.nearbyLines_table.setRowCount(len(nearbyLines))
for i in range(0,len(nearbyLines)):
ion = self.sp.true_ion(nearbyLines[i][fieldItems.index('id')])
for j in jList:
if j > jProc:
k = j - 1
else:
k = j
fmt = self.sp.field_format[fieldItems[j]]
s = fmt.format(nearbyLines[i][k])
s = str(s).strip()
if j == fieldItems.index('num'):
if self.sp.isPseudoIon(ion):
proc_str = ''
else:
proc_str = self.sp.process[s[-9]]
if j == fieldItems.index('id'):
if self.show_true_ions:
s = self.sp.true_ion(s).replace('_',' ').strip()
item = QtGui.QTableWidgetItem(s)
item.setFlags(item.flags() ^ QtCore.Qt.ItemIsEditable)
item.setBackgroundColor(self.readOnlyCells_bg_color)
self.nearbyLines_table.setItem(i,j,item)
item = QtGui.QTableWidgetItem(proc_str)
item.setFlags(item.flags() ^ QtCore.Qt.ItemIsEditable)
item.setBackgroundColor(self.readOnlyCells_bg_color)
self.nearbyLines_table.setItem(i,jProc,item)
self.nearbyLines_table.resizeColumnsToContents()
self.nearbyLines_table.resizeRowsToContents()
self.nearbyLines_table.clearSelection()
def show_nearbyLines_dialog(self):
def get_window_size_and_position():
if self.nearbyLines_dialog is None:
font = QtGui.QFont()
width = QtGui.QFontMetrics(font).width('='*120)
self.nearbyLines_dialog_width = width
self.nearbyLines_dialog_height = 470
sG = QtGui.QApplication.desktop().screenGeometry()
self.nearbyLines_dialog_x = sG.width()-self.nearbyLines_dialog_width
self.nearbyLines_dialog_y = sG.height()-self.nearbyLines_dialog_height
else:
self.nearbyLines_dialog_width = self.nearbyLines_dialog.width()
self.nearbyLines_dialog_height = self.nearbyLines_dialog.height()
self.nearbyLines_dialog_x = self.nearbyLines_dialog.pos().x()
self.nearbyLines_dialog_y = self.nearbyLines_dialog.pos().y()
def do_reset():
self.curr_line_num = self.init_nearby_line_num
#get_info(self.curr_line_num)
#fill_line_info_table()
self.nearbyDialogFilterIsActive = True
#self.nearbyLines_selected_ions = []
toggle_filter()
redo_initial_plot()
def toggle_filter():
self.nearbyLines_selected_ions = []
if not self.nearbyDialogFilterIsActive:
get_selected_ions()
if len(self.nearbyLines_selected_ions) > 0:
self.nearbyDialogFilterIsActive = True
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setStyleSheet('background-color:red;')
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setText('Deactivate ion filter')
else:
QtGui.QMessageBox.critical(self, 'nearby lines dialog: ion filter', 'No ion selected.', QtGui.QMessageBox.Ok )
else:
self.nearbyDialogFilterIsActive = False
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setStyleSheet('')
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setText('Filter selected ions')
self.fill_nearbyLines_table()
def save_initial_plot_pars():
self.init_nearby_line_num = self.line_info_box.text()
self.init_nearby_ion = self.ion_box.text()
self.init_nearby_xmin = self.xlim_min_box.text()
self.init_nearby_xmax = self.xlim_max_box.text()
self.init_nearby_y1min = self.y1lim_min_box.text()
self.init_nearby_y1max = self.y1lim_max_box.text()
self.init_nearby_y3min = self.y3lim_min_box.text()
self.init_nearby_y3max = self.y3lim_max_box.text()
self.init_nearby_legend_fontsize = self.sp.legend_fontsize
self.init_nearby_legend_loc = self.sp.legend_loc
def redo_initial_plot():
#self.line_info_box.setText(self.init_line_num)
self.ion_box.setText(self.init_nearby_ion)
self.xlim_min_box.setText(self.init_nearby_xmin)
self.xlim_max_box.setText(self.init_nearby_xmax)
self.y1lim_min_box.setText(self.init_nearby_y1min)
self.y1lim_max_box.setText(self.init_nearby_y1max)
self.y3lim_min_box.setText(self.init_nearby_y3min)
self.y3lim_max_box.setText(self.init_nearby_y3max)
self.sp.legend_fontsize = self.init_nearby_legend_fontsize
self.sp.legend_loc = self.init_nearby_legend_loc
self.set_plot_limits_and_draw()
def toggle_statusbar():
self.showStatusBar = not self.showStatusBar
statusBar.setVisible(self.showStatusBar)
def on_doubleClick():
item = self.nearbyLines_table.currentItem()
row = item.row()
col = item.column()
if col in [col_num, col_ref]:
self.line_info_box.setText(item.text())
self.show_line_info_dialog()
elif col == col_ion:
self.ion_box.setText(item.text())
self.draw_ion()
def on_itemClicked():
# to avoid blinking with itemSelectionChanged
item = self.nearbyLines_table.currentItem()
if item == self.selected_item:
on_itemSelectionChanged()
def on_itemSelectionChanged():
item = self.nearbyLines_table.currentItem()
self.selected_item = item
row = item.row()
col = item.column()
if col == col_wave:
wavelength = np.float(item.text())
l_shift = np.float(self.nearbyLines_table.item(row,col_lshift).text())
wavelength = wavelength + l_shift
line_num = str(self.nearbyLines_table.item(row,col_num).text())
ion = str(self.nearbyLines_table.item(row,col_ion).text())
max_wave = np.float(self.sp_max_box.text())
min_wave = np.float(self.sp_min_box.text())
r = (self.x_plot_lims[1] - self.x_plot_lims[0])/2
f = 0.05
if (wavelength < self.x_plot_lims[0] + f*r) or (wavelength > self.x_plot_lims[1] - f*r):
if wavelength-r < min_wave:
self.x_plot_lims = (min_wave-r*f, min_wave-r*f+2*r)
elif wavelength+r > max_wave:
self.x_plot_lims = (max_wave+r*f-2*r , max_wave+r*f)
else:
self.x_plot_lims = (wavelength-r,wavelength+r)
if not self.axes_fixed:
self.update_lim_boxes()
self.restore_axes()
self.plot_tick_at(wavelength, ion, line_num)
else:
if self.green_tick_shown:
self.on_draw()
self.green_tick_shown = False
def do_header_clicked(col):
if col == col_ion:
self.toggle_show_true_ions()
self.fill_nearbyLines_table()
def do_header_doubleClicked(col):
sort = fieldItems[col]
if sort == self.nearbyLines_sort_by:
self.nearbyLines_sort_reverse = not self.nearbyLines_sort_reverse
else:
self.nearbyLines_sort_reverse = False
self.nearbyLines_sort_by = sort
self.sort_nearbyLines(sort, self.nearbyLines_sort_reverse)
self.fill_nearbyLines_table()
def get_selected_ions():
selectedItems = self.nearbyLines_table.selectedItems()
selected_ions = []
for item in selectedItems:
col = item.column()
if col == col_ion:
ion = str(item.text())
if not ion in selected_ions:
selected_ions.append(ion)
if len(selected_ions) > 0:
self.nearbyLines_selected_ions = selected_ions
else:
#self.nearbyLines_selected_ions = self.sp.get_conf('selected_ions')
self.nearbyLines_selected_ions = []
def do_selection():
selectedItems = self.nearbyLines_table.selectedItems()
selected_ions = []
selected_lines = []
for item in selectedItems:
col = item.column()
if col == col_ion:
ion = str(item.text())
if not ion in selected_ions:
selected_ions.append(ion)
if col in [col_num, col_ref]:
line = item.text()
selected_lines.append(line)
if len(selected_ions) > 0:
s = ''
for ion in selected_ions:
s = s + ion + ', '
s = s[:-2]
self.ion_box.setText(s)
self.draw_ion()
if len(selected_lines) > 0:
s = selected_lines[0]
self.line_info_box.setText(s)
self.line_info()
get_window_size_and_position()
self.nearbyLines_dialog = QtGui.QDialog()
self.nearbyLines_dialog.resize(self.nearbyLines_dialog_width, self.nearbyLines_dialog_height)
self.nearbyLines_dialog.move(self.nearbyLines_dialog_x,self.nearbyLines_dialog_y)
statusBar = QtGui.QStatusBar()
s = 'Double-click on a line number (or select the line number and press \"Apply\") to show line info dialog. \n' \
'Double-click on an ion to plot line ticks and spectrum for that single ion. \n' \
'Click or select a wavelength to draw a tick at that position. \n' \
'Select multiple ions (using click, Shift+click, and Ctrl+click) and press \"Plot selected ions\" plot line ticks and spectra for a list of ions. \n' \
'Click on the ion header to select all ions. \n' \
'Double-click on a column header to sort the table; Double-click again to toggle between ascending and descending order. \n' \
'Click on \"Reset\" to return to the original selected ions and plot settings. \n' \
'Click on \"Filter selected ions\" to activate/deactivate ion selection.'
statusBar.addWidget(QtGui.QLabel(s),1)
self.showStatusBar = False
statusBar.setVisible(self.showStatusBar)
self.nearbyLines_table = QtGui.QTableWidget()
self.nearbyLines_table.setRowCount(len(self.nearbyLines))
fieldItems = self.sp.fields
fieldNames = [ self.sp.field_abbr[item] for item in fieldItems ]
col_num = fieldItems.index('num')
col_ion = fieldItems.index('id')
col_wave = fieldItems.index('lambda')
col_proc = fieldItems.index('proc')
col_lshift = fieldItems.index('l_shift')
col_irel = fieldItems.index('i_rel')
col_icor = fieldItems.index('i_cor')
col_ref = fieldItems.index('ref')
col_prof = fieldItems.index('profile')
col_vel = fieldItems.index('vitesse')
col_comm = fieldItems.index('comment')
self.nearbyLines_table.setColumnCount(len(fieldNames))
self.nearbyLines_table.setHorizontalHeaderLabels(fieldNames)
if self.enable_tooltips_action.isChecked():
for j in range(0,len(fieldItems)):
self.nearbyLines_table.horizontalHeaderItem(j).setToolTip(self.sp.field_tip[fieldItems[j]])
self.nearbyLines_table.horizontalHeaderItem(col_comm).setTextAlignment(QtCore.Qt.AlignLeft)
self.nearbyLines_table.horizontalHeaderItem(col_vel).setText(u'\u0394v')
if self.enable_tooltips_action.isChecked():
s = u'\u0394v is the thermal broadening parameter of the line, in km/s. \n' \
'For a single Gaussian profile, it is the half-width of the line at the level of 1/e of the peak, \n' \
'related to the full-width at half maximum and the Gaussian standard deviation by:\n\n' \
u' \u0394v = FWHM/(2(ln2)^\u00BD) = FWHM/1.665\n' \
u' \u0394v = \u221A2 \u03C3\n'
self.nearbyLines_table.horizontalHeaderItem(col_vel).setToolTip(s)
self.nearbyLines_table.horizontalHeaderItem(col_comm).setText(' comment')
#self.nearbyDialogFilterIsActive = False
self.fill_nearbyLines_table()
save_initial_plot_pars()
self.buttonBox_nearbyLines = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Help|
QtGui.QDialogButtonBox.Reset|
QtGui.QDialogButtonBox.RestoreDefaults|
QtGui.QDialogButtonBox.Apply|
QtGui.QDialogButtonBox.Close)
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setText('Filter selected ions')
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.Apply).setText('Plot selected ions')
self.buttonBox_nearbyLines.rejected.connect(self.nearbyLines_dialog.close)
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.Apply).clicked.connect(do_selection)
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.Help).clicked.connect(toggle_statusbar)
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.Reset).clicked.connect(do_reset)
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).clicked.connect(toggle_filter)
self.nearbyLines_table.doubleClicked.connect(on_doubleClick)
self.nearbyLines_table.itemSelectionChanged.connect(on_itemSelectionChanged)
self.nearbyLines_table.itemClicked.connect(on_itemClicked)
self.nearbyLines_table.verticalHeader().sectionDoubleClicked.connect(do_selection)
#self.nearbyLines_table.horizontalHeader().sectionClicked.connect(do_header_clicked)
self.nearbyLines_table.horizontalHeader().sectionDoubleClicked.connect(do_header_doubleClicked)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.nearbyLines_table)
vbox.addWidget(self.buttonBox_nearbyLines)
vbox.addWidget(statusBar)
self.nearbyLines_dialog.setLayout(vbox)
s = 'nearby line dialog: list of lines between {0:.2f} and {1:.2f} angstroms'.format(self.sp.cursor_w1, self.sp.cursor_w2)
self.nearbyLines_dialog.setWindowTitle(s)
self.nearbyLines_dialog.setWindowModality(QtCore.Qt.NonModal)
self.cursor_w1 = self.sp.cursor_w1
self.cursor_w2 = self.sp.cursor_w2
if self.nearbyDialogFilterIsActive:
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setStyleSheet('background-color:red;')
else:
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setStyleSheet('')
self.nearbyLines_dialog.show()
def cont_dialog(self):
Pars = [ ( 'cont_unred' , 'Set to True if reddening is to be applied to the continuum' ),
( 'cont_edens' , u'Electron density, in cm\u207B\u00B3' ),
( 'cont_hi_t' , 'Temperature for the H I continuum, in K' ),
( 'cont_hi_i' , u'Intensity of the H I continuum (in theory, intensity of H\u03B2)' ),
( 'cont_hei_t' , 'Temperature for the He I continuum, in K' ),
( 'cont_hei_i' , 'Intensity of the He I continuum (in theory, intensity of He I 4471)' ),
( 'cont_heii_t' , 'Temperature for the He II continuum, in K' ),
( 'cont_heii_i' , 'Intensity of the He II continuum (in theory, intensity of He I 4686)' ),
( 'cont_bb_t' , 'Temperature of the blackbody continuum, in K' ),
( 'cont_bb_i' , 'Intensity of the blackbody continuum' ),
( 'cont_pl_alpha' , u'Index \u03B1 of the power-law continuum F = I*(\u03BB/5000 \u212B)**\u03B1' ),
( 'cont_pl_i' , 'Intensity I of the power-law continuum' ),
( 'cont_user_table' , 'Interpolation table for the user-defined continuum' ),
( 'cont_user_func' , 'Interpolation function for the user-defined continuum' ) ]
def toggle_statusbar():
self.showStatusBar = not self.showStatusBar
statusBar.setVisible(self.showStatusBar)
def get_window_size_and_position():
if self.cont_pars_dialog is None:
self.cont_pars_dialog_width = 800
self.cont_pars_dialog_height = 460
sG = QtGui.QApplication.desktop().screenGeometry()
self.cont_pars_dialog_x = sG.width()-self.cont_pars_dialog_width
self.cont_pars_dialog_y = sG.height()-self.cont_pars_dialog_height
self.cont_pars_dialog_x = 0
self.cont_pars_dialog_y = 0
else:
self.cont_pars_dialog_width = self.cont_pars_dialog.width()
self.cont_pars_dialog_height = self.cont_pars_dialog.height()
self.cont_pars_dialog_x = self.cont_pars_dialog.pos().x()
self.cont_pars_dialog_y = self.cont_pars_dialog.pos().y()
def set_conf_from_table(row):
s = str(self.table.item(row,1).text())
value = self.ConvStrToValidTypes(s)
if value != None:
self.sp.set_conf(Pars[row][0], value)
self.table.setItem(row, 1, QtGui.QTableWidgetItem(str(value)))
else:
self.table.setItem(row, 1, QtGui.QTableWidgetItem('Error in ' + s))
def on_itemChanged():
self.table.blockSignals(True)
item = self.table.currentItem()
row = item.row()
s = str(item.text())
value = self.ConvStrToValidTypes(s)
if value != None:
self.sp.set_conf(Pars[row][0], value)
#if isinstance(value, basestring):
# value = '\'{}\''.format(value)
self.table.setItem(row, 1, QtGui.QTableWidgetItem(str(value)))
self.table.item(row, 1).setBackgroundColor(self.editableCells_bg_color)
self.cont_par_changed = True
else:
self.table.setItem(row, 1, QtGui.QTableWidgetItem('Error in ' + s))
self.table.item(row, 1).setBackgroundColor(QtGui.QColor('red'))
self.table.blockSignals(False)
get_window_size_and_position()
self.cont_pars_dialog = QtGui.QDialog()
self.cont_pars_dialog.resize(self.cont_pars_dialog_width, self.cont_pars_dialog_height)
self.cont_pars_dialog.move(self.cont_pars_dialog_x, self.cont_pars_dialog_y)
statusBar = QtGui.QStatusBar()
s = 'Click on \"Save\" to write the continuum parameters to a file. \n' \
'Click on \"Update\" to adjust the synthesis to the changes in the continuum parameters. \n' \
'The green fields are editable.'
statusBar.addWidget(QtGui.QLabel(s),1)
self.showStatusBar = False
statusBar.setVisible(self.showStatusBar)
self.table = QtGui.QTableWidget()
self.table.setRowCount(len(Pars))
self.table.setColumnCount(3)
self.table.setHorizontalHeaderLabels([ 'parameter', 'value', 'help' ])
for j in range(0,len(Pars)):
item = QtGui.QTableWidgetItem(Pars[j][0])
item.setFlags(item.flags() ^ QtCore.Qt.ItemIsEditable)
item.setBackgroundColor(self.readOnlyCells_bg_color)
self.table.setItem(j,0,item)
value = self.sp.get_conf(Pars[j][0])
#if isinstance(value, basestring):
# value = '\'{}\''.format(value)
item = QtGui.QTableWidgetItem(str(value))
#item = QtGui.QTableWidgetItem(str(self.sp.get_conf(Pars[j][0])))
item.setBackgroundColor(self.editableCells_bg_color)
self.table.setItem(j,1,item)
item = QtGui.QTableWidgetItem(Pars[j][1])
item.setFlags(item.flags() ^ QtCore.Qt.ItemIsEditable)
item.setBackgroundColor(self.readOnlyCells_bg_color)
self.table.setItem(j,2,item)
self.table.resizeColumnsToContents()
self.table.resizeRowsToContents()
if self.table.columnWidth(1) > 300:
self.table.setColumnWidth(1,300)
self.table.itemChanged.connect(on_itemChanged)
self.buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Help|
QtGui.QDialogButtonBox.Save|
QtGui.QDialogButtonBox.Apply|
QtGui.QDialogButtonBox.Close)
self.buttonBox.button(QtGui.QDialogButtonBox.Help).setDefault(True)
self.buttonBox.button(QtGui.QDialogButtonBox.Apply).setText('Update')
if self.enable_tooltips_action.isChecked():
self.buttonBox.button(QtGui.QDialogButtonBox.Apply).setToolTip('Click to update synthesis with changes in the continuum parameters.')
self.buttonBox.button(QtGui.QDialogButtonBox.Apply).clicked.connect(self.adjust)
self.buttonBox.rejected.connect(self.cont_pars_dialog.close)
self.buttonBox.button(QtGui.QDialogButtonBox.Save).clicked.connect(self.save_cont_pars)
self.buttonBox.button(QtGui.QDialogButtonBox.Help).clicked.connect(toggle_statusbar)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.table)
vbox.addWidget(self.buttonBox)
vbox.addWidget(statusBar)
self.cont_pars_dialog.setLayout(vbox)
self.cont_pars_dialog.setWindowTitle('Continuum parameters')
self.cont_pars_dialog.show()
def get_line_tick_lim(self, line_tick_pos):
if line_tick_pos == 1:
y1 = 0.43
y2 = 0.57
else:
if line_tick_pos == 2:
y1 = 0.05
y2 = 0.19
else:
y1 = 0.81
y2 = 0.95
return y1, y2
def getTickPosOfSelectedLine(self):
posTick = self.sp.get_conf('line_tick_pos_selectedLine',3)
if posTick not in [0,1,2]:
posOtherTicks = self.sp.get_conf('line_tick_pos')
if posTick == 4:
if posOtherTicks == 2:
posTick = 0
else:
posTick = 2
else:
posTick = posOtherTicks
return posTick
def plot_line_ticks_for(self, satellites, ion, line_num, refline):
k = self.sp.get_conf('line_tick_ax')
if not (k == 1 and self.residual_GroupBox.isChecked()):
k = 0
posTick = self.getTickPosOfSelectedLine()
y1, y2 = self.get_line_tick_lim(posTick)
if len(satellites) > 0:
if ( k == 0 ):
self.sp.plot_line_ticks_for(satellites, ion, line_num, refline, self.axes, y1, y2, self.x_plot_lims[0], self.x_plot_lims[1], self.addGreenTickToLegend)
elif ( k == 1 ):
self.sp.plot_line_ticks_for(satellites, ion, line_num, refline, self.axes3, y1, y2, self.addGreenTickToLegend)
elif ( k == 2 ):
self.sp.plot_line_ticks_for(satellites, ion, line_num, refline, self.axes2, 0.2, 0.8, self.addGreenTickToLegend)
self.green_tick_shown = True
self.canvas.draw()
def on_draw(self, show_legend=True):
log_.debug('Entering on_drawn', calling=self.calling)
if self.sp is None:
log_.debug('Np sp in on_drawn', calling=self.calling)
return
if self.axes is None:
log_.debug('Calling make_axes from on_draw (self.axes is None)', calling=self.calling)
self.call_on_draw=False
self.make_axes()
self.init_axes()
log_.debug('back from make_axes from on_draw', calling=self.calling)
self.call_on_draw=True
if self.do_save:
self.save_axes()
self.axes.cla()
self.sp.plot_ax1(self.axes, show_legend)
k = self.sp.get_conf('line_tick_ax')
if self.show_line_ticks_action.isChecked() and ( k == 0 ):
y1, y2 = self.get_line_tick_lim(self.sp.get_conf('line_tick_pos'))
self.sp.plot_line_ticks(self.axes, y1, y2, self.x_plot_lims[0], self.x_plot_lims[1], show_legend=show_legend)
if self.sp.get_conf('cont_plot', False):
self.sp.plot_conts(self.axes)
if self.residual_GroupBox.isChecked():
self.axes3.cla()
self.sp.plot_ax3(self.axes3, show_legend)
if self.show_line_ticks_action.isChecked() and ( k == 1 ):
y1, y2 = self.get_line_tick_lim(self.sp.get_conf('line_tick_pos'))
self.sp.plot_line_ticks(self.axes3, y1, y2)
if self.show_line_ticks_action.isChecked() and ( k == 2 ):
self.axes2.cla()
# self.sp.plot_ax2(self.axes2)
self.sp.plot_line_ticks(self.axes2, 0.2, 0.8)
if self.residual_GroupBox.isChecked():
self.axes3.set_xlabel(r'Wavelength ($\AA$)')
self.axes3.set_ylabel(r'Residual')
#elif self.show_line_ticks_action.isChecked() and self.sp.get_conf(') and self.axes2 is not None:
elif self.show_line_ticks_action.isChecked() and ( k == 2 ):
self.axes2.set_xlabel(r'Wavelength ($\AA$)')
else:
self.axes.set_xlabel(r'Wavelength ($\AA$)')
self.axes.set_ylabel(r'F$_\lambda$')
self.restore_axes()
# self.update_lim_boxes()
if self.adjust_fig_action.isChecked():
plt.tight_layout(0.1)
self.canvas.draw()
self.statusBar().showMessage('Redraw is finished.', 4000)
log_.debug('Exit on_drawn', calling=self.calling)
self.magenta_tick_shown = False
def show_lines_clicked(self):
if self.lineIDs_GroupBox.isChecked():
self.show_line_ticks_action.setChecked(True)
self.plot_lines_action.setChecked(True)
self.sp.set_conf('plot_lines_of_selected_ions', True)
self.set_ion()
else:
self.show_line_ticks_action.setChecked(False)
self.plot_lines_action.setChecked(False)
self.sp.set_conf('plot_lines_of_selected_ions', False)
self.make_axes()
def line_tick_color_clicked(self):
color = QtGui.QColorDialog.getColor()
self.sp.set_conf('line_tick_color', str(color.name()))
if self.show_line_ticks_action.isChecked():
self.make_axes()
def toggle_show_true_ions(self):
self.show_true_ions = not self.show_true_ions
def toggle_legend_clicked(self):
fontsize_list = ['small', 'medium', 'large']
i = fontsize_list.index(self.sp.legend_fontsize) + 1
if i == len(fontsize_list):
self.sp.legend_fontsize = fontsize_list[0]
self.sp.legend_loc = (self.sp.legend_loc)%2+1
else:
self.sp.legend_fontsize = fontsize_list[i]
self.make_axes()
def enable_tooltips_action_clicked(self):
if self.enable_tooltips_action.isChecked():
self.enableToolTips()
self.sp.set_conf('qt_enable_tooltips', True)
log_.debug('Tooltips enabled', calling=self.calling)
else:
self.disableToolTips()
self.sp.set_conf('qt_enable_tooltips', False)
log_.debug('Tooltips disabled', calling=self.calling)
def adjust_fig_action_clicked(self):
if self.adjust_fig_action.isChecked():
self.sp.set_conf('fig_adjust', True)
log_.debug('Adjust figure enabled', calling=self.calling)
else:
self.fig.subplots_adjust(hspace=self.sp.get_conf('fig_hspace'),
bottom=self.sp.get_conf('fig_bottom'),
right=self.sp.get_conf('fig_right'),
top=self.sp.get_conf('fig_top'),
left=self.sp.get_conf('fig_left'))
log_.debug('Adjust figure disabled', calling=self.calling)
self.draw_ion()
def show_uncor_obs_action_clicked(self):
if self.show_uncor_obs_action.isChecked():
self.sp.show_uncor_spec = True
else:
self.sp.show_uncor_spec = False
self.set_plot_limits_and_draw()
def disableToolTips(self):
self.lineIDs_GroupBox.setToolTip('')
self.residual_GroupBox.setToolTip('')
self.run_button.setToolTip('')
self.adjust_button.setToolTip('')
self.line_info_box.setToolTip('')
self.ebv_box.setToolTip('')
self.obj_velo_box.setToolTip('')
self.sp_min_box.setToolTip('')
self.sp_max_box.setToolTip('')
self.xlim_min_box.setToolTip('')
self.xlim_max_box.setToolTip('')
self.y1lim_min_box.setToolTip('')
self.y1lim_max_box.setToolTip('')
self.y3lim_min_box.setToolTip('')
self.y3lim_max_box.setToolTip('')
self.fix_axes_cb.setToolTip('')
self.cut_cb.setToolTip('')
self.ion_cb.setToolTip('')
self.sp_norm_box.setToolTip('')
self.resol_box.setToolTip('')
self.cut2_box.setToolTip('')
self.ion_box.setToolTip('')
self.line_sort_menu.setToolTip('')
self.line_field_menu.setToolTip('')
self.line_tick_ax_menu.setToolTip('')
self.line_tick_pos_menu.setToolTip('')
self.diff_lines_menu.setToolTip('')
self.verbosity_menu.setToolTip('')
self.style_menu.setToolTip('')
def enableToolTips(self):
self.lineIDs_GroupBox.setToolTip(self.lineIDs_GroupBox_ToolTip)
self.residual_GroupBox.setToolTip(self.residual_GroupBox_ToolTip)
self.run_button.setToolTip(self.run_button_ToolTip)
self.adjust_button.setToolTip(self.adjust_button_ToolTip)
self.line_info_box.setToolTip(self.line_info_box_ToolTip)
self.ebv_box.setToolTip(self.ebv_box_ToolTip)
self.obj_velo_box.setToolTip(self.obj_velo_box_ToolTip)
self.sp_min_box.setToolTip(self.sp_min_box_ToolTip)
self.sp_max_box.setToolTip(self.sp_max_box_ToolTip)
self.xlim_min_box.setToolTip(self.xlim_min_box_ToolTip)
self.xlim_max_box.setToolTip(self.xlim_max_box_ToolTip)
self.y1lim_min_box.setToolTip(self.y1lim_min_box_ToolTip)
self.y1lim_max_box.setToolTip(self.y1lim_max_box_ToolTip)
self.y3lim_min_box.setToolTip(self.y3lim_min_box_ToolTip)
self.y3lim_max_box.setToolTip(self.y3lim_max_box_ToolTip)
self.fix_axes_cb.setToolTip(self.fix_axes_cb_ToolTip)
self.cut_cb.setToolTip(self.cut_cb_ToolTip)
self.ion_cb.setToolTip(self.ion_cb_ToolTip)
self.sp_norm_box.setToolTip(self.sp_norm_box_ToolTip)
self.resol_box.setToolTip(self.resol_box_ToolTip)
self.cut2_box.setToolTip(self.cut2_box_ToolTip)
self.ion_box.setToolTip(self.ion_box_ToolTip)
self.line_sort_menu.setToolTip(self.line_sort_menu_ToolTip)
self.line_field_menu.setToolTip(self.line_field_menu_ToolTip)
self.line_tick_ax_menu.setToolTip(self.line_tick_ax_menu_ToolTip)
self.line_tick_pos_menu.setToolTip(self.line_tick_pos_menu_ToolTip)
self.diff_lines_menu.setToolTip(self.diff_lines_menu_ToolTip)
self.verbosity_menu.setToolTip(self.verbosity_menu_ToolTip)
self.style_menu.setToolTip(self.style_menu_ToolTip)
def show_line_ticks_action_clicked(self):
self.set_ion()
if self.plot_lines_action.isChecked():
self.sp.set_conf('plot_lines_of_selected_ions', True)
else:
self.sp.set_conf('plot_lines_of_selected_ions', False)
if self.show_line_ticks_action.isChecked() or self.plot_lines_action.isChecked():
self.lineIDs_GroupBox.setChecked(True)
else:
self.lineIDs_GroupBox.setChecked(False)
self.make_axes()
def plot_cont_action_clicked(self):
if self.plot_cont_action.isChecked():
self.sp.set_conf('cont_plot', True)
else:
self.sp.set_conf('cont_plot', False)
self.on_draw()
def ion_cb_changed(self):
if self.ion_cb.isChecked():
self.sp.set_conf('show_selected_ions_only', True)
self.selected_ions_action.setChecked(True)
else:
self.sp.set_conf('show_selected_ions_only', False)
self.selected_ions_action.setChecked(False)
self.make_axes()
def cut_cb_changed(self):
if self.cut_cb.isChecked():
self.sp.set_conf('show_selected_intensities_only', True)
self.selected_intensities_action.setChecked(True)
else:
self.sp.set_conf('show_selected_intensities_only', False)
self.selected_intensities_action.setChecked(False)
self.make_axes()
def selected_lines_clicked(self):
if self.selected_ions_action.isChecked():
self.sp.set_conf('show_selected_ions_only', True)
self.ion_cb.setChecked(True)
else:
self.sp.set_conf('show_selected_ions_only', False)
self.ion_cb.setChecked(False)
if self.selected_intensities_action.isChecked():
self.sp.set_conf('show_selected_intensities_only', True)
self.cut_cb.setChecked(True)
else:
self.sp.set_conf('show_selected_intensities_only', False)
self.cut_cb.setChecked(False)
self.make_axes()
def diff_lines_by_process_clicked(self):
if self.diff_lines_by_process_action.isChecked():
self.sp.set_conf('diff_lines_by_process', True)
else:
self.sp.set_conf('diff_lines_by_process', False)
self.make_axes()
def editing_lines_clicked(self):
if self.editing_lines_action.isChecked():
self.sp.set_conf('qt_allow_editing_lines', True)
else:
self.sp.set_conf('qt_allow_editing_lines', False)
def update_lines_clicked(self):
if self.update_lines_action.isChecked():
self.sp.set_conf('qt_update_after_editing_lines', True)
else:
self.sp.set_conf('qt_update_after_editing_lines', False)
def cycle_forwards_ions(self):
j = self.sp.get_conf('index_of_current_ion')
selected_ions = self.sp.get_conf('selected_ions')
if j in range(-1, len(self.sp.selected_ions_data)-1):
j += 1
else:
j = -1
self.sp.set_conf('index_of_current_ion', j)
self.set_refline_to_info_box(j)
self.make_axes()
def cycle_backwards_ions(self):
j = self.sp.get_conf('index_of_current_ion')
selected_ions = self.sp.get_conf('selected_ions')
if j in range(0, len(self.sp.selected_ions_data)):
j -= 1
else:
j = len(self.sp.selected_ions_data)-1
self.sp.set_conf('index_of_current_ion', j)
self.set_refline_to_info_box(j)
self.make_axes()
def show_line_ticks_from_file(self):
file_choices = "Text files (*.txt *.dat) (*.txt *.dat);;Tex files (*.tex) (*.tex);;CSV files (*.csv) (*.csv);;All Files (*) (*)"
if self.tick_file is None:
path = ''
else:
path = self.tick_file
path = unicode(QtGui.QFileDialog.getOpenFileName(self, 'Open file', path, file_choices))
if path:
self.tick_file = path
else:
return
f = open(self.tick_file, 'r')
lines = f.readlines()
f.close()
color = 'darkmagenta'
posTick = self.sp.get_conf('line_tick_pos')
y1, y2 = self.get_line_tick_lim(posTick)
k = self.sp.get_conf('line_tick_ax')
if k == 2:
k = 1
y1 = 0.2
y2 = 0.8
elif k == 1 and self.residual_GroupBox.isChecked():
k = 1
else:
k = 0
dy = (y2-y1)*0.30
if self.magenta_tick_shown == True:
self.draw_ion()
for line in lines:
line = line.strip()
line = line.split(' ')[0]
if self.isFloat(line):
wavelength = np.float(line)
if wavelength > self.x_plot_lims[0] and wavelength < self.x_plot_lims[1]:
self.fig.axes[k].axvline( wavelength, y1+dy, y2-dy, color = color, linestyle = 'solid', linewidth = 1.5 )
self.fig.axes[k].step( [0,0], [0,100], color = color, linestyle = 'solid', linewidth = 1.5, label = self.tick_file.split('/')[-1] )
self.fig.axes[k].legend(loc=self.sp.legend_loc, fontsize=self.sp.legend_fontsize)
self.fig.canvas.draw()
self.magenta_tick_shown = True
def residual_box_clicked(self):
if self.residual_GroupBox.isChecked():
self.sp.set_conf('qt_plot_residuals', True)
else:
self.sp.set_conf('qt_plot_residuals', False)
self.make_axes()
def make_axes(self):
log_.debug('Entering make_axes', calling=self.calling)
if self.call_on_draw:
self.save_axes()
self.fig.clf()
i_ax1 = 0
i_ax2 = 1
i_ax3 = 2
rspan_ax1 = 4
rspan_ax2 = 1
rspan_ax3 = 4
n_subplots = rspan_ax1
k = self.sp.get_conf('line_tick_ax')
ShowAx2 = self.show_line_ticks_action.isChecked() and ( k == 2 )
if ShowAx2:
i_ax2 = n_subplots
n_subplots += rspan_ax2
if self.residual_GroupBox.isChecked():
i_ax3 = n_subplots
n_subplots += rspan_ax3
if self.axes is not None:
del(self.axes)
self.axes = plt.subplot2grid((n_subplots,1), (i_ax1,0), rowspan=rspan_ax1)
self.sp.ax1 = self.axes
if ShowAx2:
if self.axes2 is not None:
del(self.axes2)
self.axes2 = plt.subplot2grid((n_subplots,1), (i_ax2,0), rowspan=rspan_ax2, sharex=self.axes )
self.axes2.tick_params( left='off',labelleft='off' )
self.sp.ax2 = self.axes2
self.axes.get_xaxis().set_visible(False)
else:
self.axes2 = None
self.sp.ax2 = None
if self.residual_GroupBox.isChecked():
if self.axes3 is not None:
del(self.axes3)
self.axes3 = plt.subplot2grid((n_subplots,1), (i_ax3,0), rowspan=rspan_ax3, sharex=self.axes )
self.sp.ax3 = self.axes3
if ShowAx2:
self.axes2.get_xaxis().set_visible(False)
self.axes.get_xaxis().set_visible(False)
else:
self.axes3 = None
self.sp.ax3 = self.axes3
self.fig.subplots_adjust(hspace=self.sp.get_conf('fig_hspace'),
bottom=self.sp.get_conf('fig_bottom'),
right=self.sp.get_conf('fig_right'),
top=self.sp.get_conf('fig_top'),
left=self.sp.get_conf('fig_left'))
if self.call_on_draw:
log_.debug('Calling on_draw from make_axes', calling=self.calling)
self.do_save = False
self.on_draw()
self.do_save = True
log_.debug('Exit make_axes', calling=self.calling)
def init_axes(self):
self.x_plot_lims = self.sp.get_conf('x_plot_lims')
if self.x_plot_lims is None:
self.x_plot_lims = (np.min(self.sp.w), np.max(self.sp.w))
self.y1_plot_lims = self.sp.get_conf('y1_plot_lims')
if self.y1_plot_lims is None:
mask = (self.sp.w_ori > self.x_plot_lims[0]) & (self.sp.w_ori < self.x_plot_lims[1])
r = 1.2
if self.sp.sp_synth_lr is None:
a = np.min(self.sp.f[mask])
b = np.max(self.sp.f[mask])
else:
a = np.min(self.sp.sp_synth_lr[mask])
b = np.max(self.sp.sp_synth_lr[mask])
self.y1_plot_lims = ((a*(1+r)+b*(1-r))/2, (a*(1-r)+b*(1+r))/2)
self.y2_plot_lims = self.sp.get_conf('y2_plot_lims')
if self.y2_plot_lims is None:
self.y2_plot_lims = (-0.5, 1.5)
self.y3_plot_lims = self.sp.get_conf('y3_plot_lims')
if self.y3_plot_lims is None:
mask = (self.sp.w_ori > self.x_plot_lims[0]) & (self.sp.w_ori < self.x_plot_lims[1])
r = 1.2
if self.sp.sp_synth_lr is None:
self.y3_plot_lims = (-1,1)
else:
a = np.min((self.sp.f_ori - self.sp.sp_synth_lr)[mask])
b = np.max((self.sp.f_ori - self.sp.sp_synth_lr)[mask])
self.y3_plot_lims = ((a*(1+r)+b*(1-r))/2, (a*(1-r)+b*(1+r))/2)
log_.debug('Axes initialized. IDs {} {} {}'.format(id(self.axes), id(self.axes2), id(self.axes3)), calling=self.calling)
self.print_axes()
def save_axes(self):
if self.axes is not None:
self.x_plot_lims = self.axes.get_xlim()
self.y1_plot_lims = self.axes.get_ylim()
self.xscale = self.axes.get_xscale()
self.yscale = self.axes.get_yscale()
if self.axes2 is not None:
self.y2_plot_lims = self.axes2.get_ylim()
if self.axes3 is not None:
self.y3_plot_lims = self.axes3.get_ylim()
self.sp.save_axes()
log_.debug('Axes saved. IDs {} {} {}'.format(id(self.axes), id(self.axes2), id(self.axes3)), calling=self.calling)
self.print_axes()
def restore_axes(self):
if self.x_plot_lims is not None:
if self.axes is not None:
self.axes.set_xlim(self.x_plot_lims)
log_.debug('X-axes restored to {}'.format(self.axes.get_xlim()), calling=self.calling)
else:
log_.debug('axes is None', calling=self.calling)
else:
log_.debug('x_plot_lims is None', calling=self.calling)
if self.y1_plot_lims is not None:
if self.axes is not None:
self.axes.set_ylim(self.y1_plot_lims)
if self.y2_plot_lims is not None:
if self.axes2 is not None:
self.axes2.set_ylim(self.y2_plot_lims)
if self.y3_plot_lims is not None:
if self.axes3 is not None:
self.axes3.set_ylim(self.y3_plot_lims)
if self.xscale is not None:
self.axes.set_xscale(self.xscale)
log_.debug('X scale set to {}'.format(self.xscale))
if self.yscale is not None:
self.axes.set_yscale(self.yscale)
log_.debug('Y scale set to {}'.format(self.yscale))
log_.debug('Axes restored. IDs {} {} {}'.format(id(self.axes), id(self.axes2), id(self.axes3)), calling=self.calling)
self.print_axes()
def print_axes(self):
log_.debug('lims: {} {} {} {}'.format(self.x_plot_lims, self.y1_plot_lims, self.y2_plot_lims, self.y3_plot_lims), calling=self.calling)
log_.debug('Axes IDs {} {} {}'.format(id(self.axes), id(self.axes2), id(self.axes3)), calling=self.calling)
log_.debug(' IDs {} {} {}'.format(id(self.axes), id(self.axes2), id(self.axes3)), calling=self.calling)
def exec_init(self):
if self.init_file_name is None:
self.get_init_filename()
if self.init_file_name:
self.statusBar().showMessage('Running synthesis ...')
QtGui.QApplication.processEvents()
self.start_spectrum()
self.do_save = False
self.on_draw()
self.do_save = True
self.restore_axes()
self.update_lim_boxes()
self.save_parameters_file = None
else:
log_.warn('A filename must be given', calling=self.calling)
sys.exit('An initialization filename must be given')
def get_init_filename(self):
file_choices = "Python initialization files (*init.py) (*init.py);;Python files (*.py) (*.py);;All files (*) (*)"
title = 'Open pySSN initialization file'
init_file = str(QtGui.QFileDialog.getOpenFileName(self, title, self.init_file_name, file_choices))
if init_file and os.path.isfile(init_file):
self.init_file_name = init_file
else:
self.init_file_name = ''
def select_init(self):
old_name = self.init_file_name
self.get_init_filename()
if self.init_file_name:
self.exec_init()
else:
self.init_file_name = old_name
def save_pars(self):
path = self.sp.get_conf('save_parameters_filename')
keys = self.sp.default_keys
if '__builtins__' in keys:
keys.remove('__builtins__')
keys.sort()
with open(path, 'w') as f:
for key in keys:
value = self.sp.conf[key]
if isinstance(value, basestring):
value = '\"{}\"'.format(value)
f.write('{} = {}\n'.format(key, value))
self.statusBar().showMessage('Parameters saved to file %s' % path, 4000)
def save_pars_as(self):
if self.save_parameters_file is None:
path = self.init_file_name
else:
path = self.save_parameters_file
keys = self.sp.default_keys
keys_to_be_removed = ['__builtins__', 'plot_magenta', 'label_magenta', 'plot_cyan', 'label_cyan']
for key in keys_to_be_removed:
if key in keys:
keys.remove(key)
keys.sort()
file_choices = "pySSN initialization files (*init.py) (*init.py);;Python files (*.py) (*.py);;All files (*) (*)"
title = 'Save synthesis and plot parameters'
selectedFilter = 'pySSN initialization files (*init.py) (*init.py)'
path = unicode(QtGui.QFileDialog.getSaveFileName(self, title, path, file_choices, selectedFilter))
if path:
with open(path, 'w') as f:
for key in keys:
if key == 'instr_prof':
value = self.sp.format_instr_prof()
else:
value = self.sp.conf[key]
if isinstance(value, basestring):
value = '\"{}\"'.format(value)
f.write('{} = {}\n'.format(key, value))
self.save_parameters_file = path
self.statusBar().showMessage('Parameters saved to file %s' % path, 4000)
def teste_instr_prof(self, prof):
if prof is None:
return 'not defined'
keys = prof.keys()
keys.remove('comment')
if not 'width' in keys:
return 'The parameter \'width\' was not found.'
if prof['width'] == 0.0:
return 'The value of \'width\' can not be zero'
if not (self.sp.get_key_indexes('Bb', prof)==self.sp.get_key_indexes('Br', prof)==
self.sp.get_key_indexes('beta', prof)==self.sp.get_key_indexes('alpha', prof)):
return 'Invalid indexes por the parameters \'Bb\', \'Br\', \'alpha\', or \'beta\''
if not all((type(prof[key])==float or type(prof[key])==int) for key in keys):
return 'The values of parameters must be numbers.'
return ''
def apply_instr_prof(self):
def do_update():
path = str(prof_box.toPlainText()).strip()
try:
user_module = {}
exec(path) in user_module
prof = user_module['instr_prof']
self.sp.set_conf('instr_prof', prof)
log_.message('new instrumental profile is ok', calling = self.calling)
except:
title = 'Error reading instrument profile'
msg = 'Unable to read instrumental profile'
path = None
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
return
msg = self.teste_instr_prof(prof)
if not msg:
self.update_profile()
else:
title = 'Error in the instrument profile'
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
def toggle_statusbar():
self.showHelpBrowser = not self.showHelpBrowser
helpBrowser.setVisible(self.showHelpBrowser)
if self.showHelpBrowser:
self.instr_prof_dialog.resize(self.instr_prof_dialog_width, 2.1*self.instr_prof_dialog_height)
else:
self.instr_prof_dialog.resize(self.instr_prof_dialog_width, self.instr_prof_dialog_height)
def get_window_size_and_position():
if self.instr_prof_dialog is None:
font = QtGui.QFont("Courier")
width = QtGui.QFontMetrics(font).width('='*80)
height = 15*QtGui.QFontMetrics(font).height()
self.instr_prof_dialog_width = width
self.instr_prof_dialog_height = height
sG = QtGui.QApplication.desktop().screenGeometry()
self.instr_prof_dialog_x = sG.width()-self.instr_prof_dialog_width
self.instr_prof_dialog_y = sG.height()
else:
if not self.showHelpBrowser:
self.instr_prof_dialog_width = self.instr_prof_dialog.width()
self.instr_prof_dialog_height = self.instr_prof_dialog.height()
self.instr_prof_dialog_x = self.instr_prof_dialog.pos().x()
self.instr_prof_dialog_y = self.instr_prof_dialog.pos().y()
self.showHelpBrowser = False
get_window_size_and_position()
self.instr_prof_dialog = QtGui.QDialog()
self.instr_prof_dialog.setWindowFlags(self.instr_prof_dialog.windowFlags() | QtCore.Qt.WindowStaysOnTopHint)
self.instr_prof_dialog.resize(self.instr_prof_dialog_width, self.instr_prof_dialog_height)
self.instr_prof_dialog.move(self.instr_prof_dialog_x,self.instr_prof_dialog_y)
self.instr_prof_dialog.setWindowTitle('instrument profile dialog')
prof_box = QtGui.QTextEdit()
prof_box.setFontFamily("Courier")
prof_box.setText('instr_prof = ' + self.sp.format_instr_prof())
linkLabel = QtGui.QLabel('<a href="https://github.com/Morisset/pySSN/wiki">More help online</a>')
linkLabel.setOpenExternalLinks(True)
helpBrowser = QtGui.QTextBrowser()
# text=open('instr_prof.html').read()
# This text should go to a file open with text=open('instr_prof.html').read()
text = """<title> Instrumental profile help</title>
<p>The instrumental profile if defined by the <a href="https://en.wikibooks.org/wiki/Python_Programming/Dictionaries">python dictionary</a> <b>instr_prof</b>.
<p>The main component of the instrumental profile is set by the parameter <b>width</b>, which is the only indispensable parameters.</p>
<p>If <b>width</b> > 0, the main component profile follows a <a href="https://en.wikipedia.org/wiki/Normal_distribution">Gaussian distribution</a>, P ∝ exp(-(λ/<b>width</b>)<sup>2</sup>).
In this case, <b>width</b> is related to the normal full-width at half maximum by <b>width</b> = FWHM/(2(ln2)<sup>1/2</sup>) = FWHM/1.665.</p>
<p>If <b>width</b> < 0, the main component profile follows a <a href="https://en.wikipedia.org/wiki/rectangular_distribution">rectangular distribution</a>, P = 1 for -|<b>width</b>|/2 < λ < |<b>width</b>|/2, and P = 0 for all other values of λ.</p>
<p>A variable number of optional components can be included, each defined by four parameters, <b>Bb</b>, <b>Br</b>, <b>alpha</b>, and <b>beta</b>, and following P ∝ <b>B</b>exp(-(λ/<b>beta</b>)<sup><b>alpha</b></sup>).
<b>Bb</b> and <b>Br</b> are the intensity scale parameters for the bluish and reddish sides of the profile, respectively.</p>
<p>If more than one optional component is in use, the parameters must be indexed as <b>alpha_1</b> <b>alpha_2</b>, etc.</p>
Special cases for the optional components:
<ul>
<li><b>alpha</b> = 2 produces a <a href="https://en.wikipedia.org/wiki/Normal_distribution">Gaussian distribution</a>.
<li><b>alpha</b> = 2, <b>Bb</b> = 0 (or <b>Br</b> = 0) produces a <a href="https://en.wikipedia.org/wiki/Half_normal_distribution">half-Gaussian distribution</a>.
<li><b>alpha</b> = 1 produces an <a href="https://en.wikipedia.org/wiki/Exponential_distribution">exponential distribution</a>.
</ul>
<p>A comment may be included in <b>instr_prof</b>.</p>
<p>Examples:</p>
<ol>
<li>instr_prof = {'width': 0.5}<br>
<li>instr_prof = {'width': 0.5, 'comment': 'Gaussian profle'}<br>
<li>Example: instr_prof = {'width': 0.5, 'Bb':0.00016, 'Br':9e-05, 'beta': 2.2, 'alpha': 0.45}<br>
<li>instr_prof = {'width': 0.5, 'Bb_1':0.00016, 'Br_1':9e-05, 'beta_1': 2.2, 'alpha_1': 0.45, 'Bb_2': 0.0014, 'Br_2':0.001, 'beta_2': 1.4, 'alpha_2': 0.75}<br>
</ol>"""
helpBrowser.document().setHtml(text)
helpBrowser.setOpenExternalLinks(True)
helpBrowser.setVisible(self.showHelpBrowser)
policy = helpBrowser.sizePolicy()
policy.setVerticalStretch(20)
helpBrowser.setSizePolicy(policy)
vbox = QtGui.QVBoxLayout()
buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Help|
QtGui.QDialogButtonBox.Close|
QtGui.QDialogButtonBox.Apply)
buttonBox.button(QtGui.QDialogButtonBox.Apply).setText("Update")
vbox.addWidget(prof_box,0)
vbox.addWidget(buttonBox)
vbox.addWidget(linkLabel)
vbox.addWidget(helpBrowser)
buttonBox.button(QtGui.QDialogButtonBox.Help).clicked.connect(toggle_statusbar)
buttonBox.button(QtGui.QDialogButtonBox.Apply).clicked.connect(do_update)
buttonBox.rejected.connect(self.instr_prof_dialog.close)
self.instr_prof_dialog.setLayout(vbox)
self.instr_prof_dialog.setWindowModality(QtCore.Qt.NonModal)
self.instr_prof_dialog.show()
def refine_wavelengths(self):
def table2list(text):
text = str(text)
text = text.splitlines()
s = ''
for i in range(len(text)):
line = text[i].split()
if len(line) == 2 and sum([self.isFloat(x) for x in line]) == 2:
s += '({}, {}), '.format(line[0], line[1])
else:
if len(line) > 0:
title = 'Error in table'
msg = 'Error in line \'{}\'.\nEach line must have two numbers separated by whitespaces.'.format(text[i])
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
return ''
s = s.strip(' ,')
if s == '':
return 'lambda_shift_table = None'
else:
return 'lambda_shift_table = [{}]'.format(s)
def toggle_table():
self.refine_wave_as_table = not self.refine_wave_as_table
if self.refine_wave_as_table:
text = str(edit_box.toPlainText()).strip()
edit_box.clear()
text = text.replace('lambda_shift_table','')
text = text.strip(' =[]')
text = text.split(')')
for i in range(len(text)-1):
line = text[i].strip(' (,')
line = line.split(',')
line = '{:<7} {}'.format(line[0].strip(),line[1].strip())
edit_box.append(line)
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).setText("Show as list")
else:
text = table2list(edit_box.toPlainText())
if text == '':
self.refine_wave_as_table = True
return
edit_box.clear()
edit_box.setText(text)
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).setText("Show as table")
def do_update():
old_value = self.sp.get_conf('lambda_shift_table')
if self.refine_wave_as_table:
path = table2list(edit_box.toPlainText())
if path == 'error':
return
else:
path = str(edit_box.toPlainText()).strip()
try:
user_module = {}
exec(path) in user_module
value = user_module['lambda_shift_table']
self.sp.set_conf('lambda_shift_table', value)
log_.message('new \'lambda_shit_table\' is ok', calling = self.calling)
except:
title = 'Error'
msg = 'Unable to read \'lambda_shit_table\''
path = None
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
return
self.sp.show_uncor_spec = True
self.sp.init_obs()
if self.sp.read_obs_error:
self.sp.set_conf('lambda_shift_table', old_value)
if self.showErrorBox:
title = 'Error'
msg = self.sp.read_obs_error
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
else:
self.rerun()
if not self.show_uncor_obs_action.isChecked():
self.sp.show_uncor_spec = False
def toggle_help():
self.showHelpBrowser = not self.showHelpBrowser
helpBrowser.setVisible(self.showHelpBrowser)
if self.showHelpBrowser:
self.refine_wave_dialog.resize(self.refine_wave_dialog_width, 2.5*self.refine_wave_dialog_height)
else:
self.refine_wave_dialog.resize(self.refine_wave_dialog_width, self.refine_wave_dialog_height)
def get_window_size_and_position():
if self.refine_wave_dialog is None:
font = QtGui.QFont("Courier")
width = QtGui.QFontMetrics(font).width('='*80)
height = 15*QtGui.QFontMetrics(font).height()
self.refine_wave_dialog_width = width
self.refine_wave_dialog_height = height
sG = QtGui.QApplication.desktop().screenGeometry()
self.refine_wave_dialog_x = sG.width()-self.refine_wave_dialog_width
self.refine_wave_dialog_y = sG.height()
else:
if not self.showHelpBrowser:
self.refine_wave_dialog_width = self.refine_wave_dialog.width()
self.refine_wave_dialog_height = self.refine_wave_dialog.height()
self.refine_wave_dialog_x = self.refine_wave_dialog.pos().x()
self.refine_wave_dialog_y = self.refine_wave_dialog.pos().y()
self.showHelpBrowser = False
get_window_size_and_position()
self.refine_wave_dialog = QtGui.QDialog()
self.refine_wave_dialog.setWindowFlags(self.refine_wave_dialog.windowFlags() | QtCore.Qt.WindowStaysOnTopHint)
self.refine_wave_dialog.resize(self.refine_wave_dialog_width, self.refine_wave_dialog_height)
self.refine_wave_dialog.move(self.refine_wave_dialog_x,self.refine_wave_dialog_y)
self.refine_wave_dialog.setWindowTitle('wavelength-refining dialog')
edit_box = QtGui.QTextEdit()
edit_box.setFontFamily("Courier")
self.refine_wave_as_table = False
edit_box.setText('lambda_shift_table = ' + str(self.sp.get_conf('lambda_shift_table')))
linkLabel = QtGui.QLabel('<a href="https://github.com/Morisset/pySSN/wiki">More help online</a>')
linkLabel.setOpenExternalLinks(True)
helpBrowser = QtGui.QTextBrowser()
# text=open('wave_refining.html').read()
# This text should go to a file open with text=open('wave-refining').read()
text = """<title> Wavelength-refining help</title>
<p>The wavelength calibration of the observational spectrum can be refined with the use of
the <a href="https://en.wikibooks.org/wiki/Python_Programming/Lists">python list</a> <b>lambda_shift_table</b>.
Each element of this list is an ordered pair of numbers (λ, Δλ), where Δλ is the wavelength shift at the wavelength λ needed to improve the calibration, after the Doppler correction.</p>
<p>The data in <b>lambda_shit_table</b> will be linearly interpolated to provide the corrected wavelengths.
Outside the range of wavelenghts given in <b>lambda_shit_table</b>, the correction will be extrapolated to zero.</p>
<p>To set aside the wavelength-refining, set <b>lambda_shit_table</b> to None.</p>
<p>Examples:</p>
<ol>
<li><p>lambda_shift_table = [(4674, 0.05), (4690, 0.1), (9000, 1)]</p></li>
<li><p>lambda_shift_table = None (to set aside the wavelength-refining)</p></li>
</ol>
<p>Button functions:</p>
<ul>
<li><p>Click on <b><span style="color:red">Show as table</span></b> to display and edit the data contained in <b>lambda_shit_table</b> as a two columns table.</p></li>
<li><p>Click on <b><span style="color:red">Show as list</span></b> to get back the <b>lambda_shit_table</b> list from the two columns table.</p></li>
<li><p>Click on <b><span style="color:red">Update</span></b> to refine the wavelength calibration and redo the synthesis.</p></li>
</ul>
"""
helpBrowser.document().setHtml(text)
helpBrowser.setOpenExternalLinks(True)
helpBrowser.setVisible(self.showHelpBrowser)
policy = helpBrowser.sizePolicy()
policy.setVerticalStretch(20)
helpBrowser.setSizePolicy(policy)
vbox = QtGui.QVBoxLayout()
buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Help|
QtGui.QDialogButtonBox.RestoreDefaults|
QtGui.QDialogButtonBox.Close|
QtGui.QDialogButtonBox.Apply)
buttonBox.button(QtGui.QDialogButtonBox.Apply).setText("Update")
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).setText("Show as table")
vbox.addWidget(edit_box,0)
vbox.addWidget(buttonBox)
vbox.addWidget(linkLabel)
vbox.addWidget(helpBrowser)
buttonBox.button(QtGui.QDialogButtonBox.Help).clicked.connect(toggle_help)
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).clicked.connect(toggle_table)
buttonBox.button(QtGui.QDialogButtonBox.Apply).clicked.connect(do_update)
buttonBox.rejected.connect(self.refine_wave_dialog.close)
self.refine_wave_dialog.setLayout(vbox)
self.refine_wave_dialog.setWindowModality(QtCore.Qt.NonModal)
self.refine_wave_dialog.show()
def plot_user_cont(self):
self.fig.axes[0].step( [0,0], [0,100], color = color, linestyle = 'solid', label = label, linewidth = 2.5 )
self.fig.axes[0].legend(loc=current_legend_loc, fontsize=self.sp.legend_fontsize)
self.fig.canvas.draw()
def user_cont_table2list(self, text):
text = str(text)
text = text.splitlines()
text = sorted(text)
s = ''
for i in range(len(text)):
line = text[i].split()
if len(line) == 2 and sum([self.isFloat(x) for x in line]) == 2:
s += '({}, {}), '.format(line[0], line[1])
else:
if len(line) > 0:
title = 'Error in table'
msg = 'Error in line \'{}\'.\nEach line must have two numbers separated by whitespaces.'.format(text[i])
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
return ''
s = s.strip(' ,')
if s == '':
s = 'None'
else:
s = '[{}]'.format(s)
return 'cont_user_func = \'{}\'\n\ncont_user_table = {}'.format(self.sp.get_conf('cont_user_func'), s)
def update_user_cont(self):
msg = ''
old_value = self.sp.get_conf('cont_user_table')
old_kind = self.sp.get_conf('cont_user_func')
if self.interpol_cont_as_table:
path = self.user_cont_table2list(self.user_cont_editBox.toPlainText())
if path == 'error':
return
else:
path = str(self.user_cont_editBox.toPlainText()).strip()
try:
user_module = {}
exec(path) in user_module
kind = user_module['cont_user_func']
log_.message('new \'cont_user_func\' is ok', calling = self.calling)
value = user_module['cont_user_table']
log_.message('new \'cont_user_table\' is ok', calling = self.calling)
except:
msg = 'Unable to read \'cont_user_func\' or \'cont_user_table\''
path = None
kinds = {'nearest', 'zero', 'linear', 'slinear', 'quadratic', 'cubic'}
if msg == '':
if kind not in kinds:
msg = 'Invalid function'
if msg != '':
title = 'Error'
msg = 'Problem in user-defined continuum interpolation.\n{}'.format(msg)
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
return
if old_value != value or old_kind != kind:
self.cont_par_changed = True
if value is not None and len(value) == 0:
value = None
self.sp.set_conf('cont_user_table', value)
self.sp.set_conf('cont_user_func', kind)
self.sp.update_user_cont()
self.set_plot_limits_and_draw()
self.sp.plot_conts(self.axes, ['user'])
self.canvas.draw()
else:
self.set_plot_limits_and_draw()
def user_cont_list2table(self, points):
self.user_cont_editBox.clear()
for point in points:
line = '{:<7} {}'.format(str(point[0]).strip(),str(point[1]).strip())
self.user_cont_editBox.append(line)
def user_continuum(self):
def save_initial_plot_pars():
self.init_cont_line_num = self.line_info_box.text()
self.init_cont_ion = self.ion_box.text()
self.init_cont_xmin = self.xlim_min_box.text()
self.init_cont_xmax = self.xlim_max_box.text()
self.init_cont_y1min = self.y1lim_min_box.text()
self.init_cont_y1max = self.y1lim_max_box.text()
self.init_cont_y3min = self.y3lim_min_box.text()
self.init_cont_y3max = self.y3lim_max_box.text()
self.init_cont_legend_fontsize = self.sp.legend_fontsize
self.init_cont_legend_loc = self.sp.legend_loc
self.init_cont_sel_ions_only = self.selected_ions_action.isChecked()
def redo_initial_plot():
self.line_info_box.setText(self.init_cont_line_num)
self.ion_box.setText(self.init_cont_ion)
self.xlim_min_box.setText(self.init_cont_xmin)
self.xlim_max_box.setText(self.init_cont_xmax)
self.y1lim_min_box.setText(self.init_cont_y1min)
self.y1lim_max_box.setText(self.init_cont_y1max)
self.y3lim_min_box.setText(self.init_cont_y3min)
self.y3lim_max_box.setText(self.init_cont_y3max)
self.sp.legend_fontsize = self.init_cont_legend_fontsize
self.sp.legend_loc = self.init_cont_legend_loc
self.selected_ions_action.setChecked(self.init_cont_sel_ions_only)
self.selected_lines_clicked()
self.set_plot_limits_and_draw()
def toggle_table():
self.interpol_cont_as_table = not self.interpol_cont_as_table
if self.interpol_cont_as_table:
text = str(self.user_cont_editBox.toPlainText()).strip()
text = text[text.find('[')+1:text.find(']')]
text = text.replace('\n','')
self.user_cont_editBox.clear()
text = text.split(')')
for i in range(len(text)-1):
line = text[i].strip(' (,')
line = line.split(',')
line = '{:<7} {}'.format(line[0].strip(),line[1].strip())
self.user_cont_editBox.append(line)
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).setText("Show as list")
else:
self.get_user_cont_points = False
buttonBox.button(QtGui.QDialogButtonBox.Retry).setStyleSheet('')
self.del_user_cont_points = False
buttonBox.button(QtGui.QDialogButtonBox.Ignore).setStyleSheet('')
self.on_draw()
text = self.user_cont_table2list(self.user_cont_editBox.toPlainText())
if text == '':
self.interpol_cont_as_table = True
return
self.user_cont_editBox.clear()
self.user_cont_editBox.setText(text)
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).setText("Show as table")
def toggle_help():
self.showHelpBrowser = not self.showHelpBrowser
helpBrowser.setVisible(self.showHelpBrowser)
if self.showHelpBrowser:
self.interpol_cont_dialog.resize(self.interpol_cont_dialog_width, 2.5*self.interpol_cont_dialog_height)
else:
self.interpol_cont_dialog.resize(self.interpol_cont_dialog_width, self.interpol_cont_dialog_height)
def get_window_size_and_position():
if self.interpol_cont_dialog is None:
font = QtGui.QFont("Courier")
width = QtGui.QFontMetrics(font).width('='*80)
height = 15*QtGui.QFontMetrics(font).height()
self.interpol_cont_dialog_width = width
self.interpol_cont_dialog_height = height
sG = QtGui.QApplication.desktop().screenGeometry()
self.interpol_cont_dialog_x = sG.width()-self.interpol_cont_dialog_width
self.interpol_cont_dialog_y = sG.height()
else:
if not self.showHelpBrowser:
self.interpol_cont_dialog_width = self.interpol_cont_dialog.width()
self.interpol_cont_dialog_height = self.interpol_cont_dialog.height()
self.interpol_cont_dialog_x = self.interpol_cont_dialog.pos().x()
self.interpol_cont_dialog_y = self.interpol_cont_dialog.pos().y()
def get_points():
self.get_user_cont_points = not self.get_user_cont_points
self.del_user_cont_points = False
buttonBox.button(QtGui.QDialogButtonBox.Ignore).setStyleSheet('')
if self.get_user_cont_points:
buttonBox.button(QtGui.QDialogButtonBox.Retry).setStyleSheet('background-color:red;')
self.set_plot_limits_and_draw()
self.sp.plot_conts(self.axes, ['user'])
self.canvas.draw()
if self.interpol_cont_as_table == False:
toggle_table()
else:
buttonBox.button(QtGui.QDialogButtonBox.Retry).setStyleSheet('')
def del_points():
self.del_user_cont_points = not self.del_user_cont_points
self.get_user_cont_points = False
buttonBox.button(QtGui.QDialogButtonBox.Retry).setStyleSheet('')
if self.del_user_cont_points:
buttonBox.button(QtGui.QDialogButtonBox.Ignore).setStyleSheet('background-color:red;')
self.set_plot_limits_and_draw()
self.sp.plot_conts(self.axes, ['user'])
self.canvas.draw()
if self.interpol_cont_as_table == False:
toggle_table()
else:
buttonBox.button(QtGui.QDialogButtonBox.Ignore).setStyleSheet('')
def on_close():
redo_initial_plot()
self.interpol_cont_dialog.close()
def do_update():
self.get_user_cont_points = False
self.del_user_cont_points = False
buttonBox.button(QtGui.QDialogButtonBox.Retry).setStyleSheet('')
buttonBox.button(QtGui.QDialogButtonBox.Ignore).setStyleSheet('')
self.update_user_cont()
self.showHelpBrowser = False
get_window_size_and_position()
save_initial_plot_pars()
self.ion_box.setText('')
self.selected_ions_action.setChecked(True)
self.selected_lines_clicked()
self.set_plot_limits_and_draw()
self.interpol_cont_dialog = QtGui.QDialog()
self.interpol_cont_dialog.setWindowFlags(self.interpol_cont_dialog.windowFlags() | QtCore.Qt.WindowStaysOnTopHint)
#self.interpol_cont_dialog.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint | QtCore.Qt.WindowMaximizeButtonHint | QtCore.Qt.WindowStaysOnTopHint)
self.interpol_cont_dialog.resize(self.interpol_cont_dialog_width, self.interpol_cont_dialog_height)
self.interpol_cont_dialog.move(self.interpol_cont_dialog_x,self.interpol_cont_dialog_y)
self.interpol_cont_dialog.setWindowTitle('user-defined continuum dialog')
self.user_cont_editBox = QtGui.QTextEdit()
self.user_cont_editBox.setFontFamily("Courier")
self.interpol_cont_as_table = False
self.get_user_cont_points = False
self.del_user_cont_points = False
text = 'cont_user_func = \'{}\'\n\ncont_user_table = {}'.format(str(self.sp.get_conf('cont_user_func')), self.sp.get_conf('cont_user_table'))
self.user_cont_editBox.setText(text)
linkLabel = QtGui.QLabel('<a href="https://github.com/Morisset/pySSN/wiki">More help online</a>')
linkLabel.setOpenExternalLinks(True)
helpBrowser = QtGui.QTextBrowser()
# text=open('user_continuum.html').read()
# This text should go to a file open with text=open('user_continuum').read()
text = """<title> User-defined continuum help</title>
<p>A user-defined continuum can be added to the continuum calculated from other sources (electron recombination, free-free transition, two-photom, black-body and
power-law emission). It is obtained by the interpolation of the data contained in the
<a href="https://en.wikibooks.org/wiki/Python_Programming/Lists">python list</a> <b>cont_user_table</b>. Each element of this list is an ordered pair of numbers
(λ, <i>f</i>), where <i>f</i> is the additional continuum flux at the wavelength λ.</p>
<p>The parameter <b>cont_user_func</b> defines the kind of the interpolation. Possible values are 'linear', 'quadratic', 'cubic', corresponding to linear
interpolation, second and third order spline interpolation, respectively. Outside the range of wavelenghts given in <b>cont_user_table</b>, the user continuum
component will be extrapolated to zero.</p>
<p>There are three modes of editing the interpolation control points: editing the list <b>cont_user_table</b> directly or as a two columns table, or clicking
with the mouse on the figure at the intended level of total continuum (see Button functions below). To set aside the user-defined continuum, set
<b>cont_user_table</b> to None.</p>
<p>Examples:</p>
<ol>
<li><p>cont_user_func = 'linear'<br>
cont_user_table = [(4674, 0.05), (4690, 0.1), (9000, 1)]
</p></li>
<li><p>cont_user_table = None (to set aside the user-defined continuum)</p></li>
</ol>
<p>Button functions:</p>
<ul>
<li><p>Click on <b><span style="color:red">Show as table</span></b> to display and edit the data contained in <b>cont_user_table</b> as a two columns table.</p></li>
<li><p>Click on <b><span style="color:red">Show as list</span></b> to get back the <b>cont_user_table</b> list from the two columns table.</p></li>
<li><p>Click on <b><span style="color:red">Add points</span></b> to activate/deactivate the mode that allows to add new controls points by mouse-clicking on the
figure. Each time a new control point is included, the interpolation is automatically updated.</p></li>
<li><p>Click on <b><span style="color:red">Del points</span></b> to activate/deactivate the mode that allows to click on the figure to delete the nearest
(in wavelength) control point. Each time a control point is deleted, the interpolation is automatically updated</p></li>
<li><p>Click on <b><span style="color:red">Update</span></b> to incorporate the changes in the user-defined continuum.</p></li>
<li><p>Click on <b><span style="color:red">Close</span></b> to close the dialog and return to the preceding plot setting.</p></li>
</ul>
"""
helpBrowser.document().setHtml(text)
helpBrowser.setOpenExternalLinks(True)
helpBrowser.setVisible(self.showHelpBrowser)
policy = helpBrowser.sizePolicy()
policy.setVerticalStretch(20)
helpBrowser.setSizePolicy(policy)
vbox = QtGui.QVBoxLayout()
buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Help|
QtGui.QDialogButtonBox.RestoreDefaults|
QtGui.QDialogButtonBox.Retry|
QtGui.QDialogButtonBox.Ignore|
QtGui.QDialogButtonBox.Close|
QtGui.QDialogButtonBox.Apply)
buttonBox.button(QtGui.QDialogButtonBox.Apply).setText("Update")
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).setText("Show as table")
buttonBox.button(QtGui.QDialogButtonBox.Retry).setText("Add points")
buttonBox.button(QtGui.QDialogButtonBox.Ignore).setText("Del points")
vbox.addWidget(self.user_cont_editBox,0)
vbox.addWidget(buttonBox)
vbox.addWidget(linkLabel)
vbox.addWidget(helpBrowser)
buttonBox.button(QtGui.QDialogButtonBox.Help).clicked.connect(toggle_help)
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).clicked.connect(toggle_table)
buttonBox.button(QtGui.QDialogButtonBox.Apply).clicked.connect(do_update)
buttonBox.button(QtGui.QDialogButtonBox.Retry).clicked.connect(get_points)
buttonBox.button(QtGui.QDialogButtonBox.Ignore).clicked.connect(del_points)
buttonBox.rejected.connect(on_close)
#self.interpol_cont_dialog.onCloseEvet(on_close)
self.interpol_cont_dialog.setLayout(vbox)
self.interpol_cont_dialog.setWindowModality(QtCore.Qt.NonModal)
self.interpol_cont_dialog.show()
def isValidFilename(self, filename):
if filename is None:
return False
try:
open(filename,'r')
return True
except IOError:
try:
open(filename, 'w')
return True
except IOError:
return False
def set_cosmetic_file(self):
file_choices = "Line cosmetic files (*cosm*.dat) (*cosm*.dat);;Data files (*.dat) (*.dat);;All files (*) (*)"
title = 'Set the line cosmetic file'
cosmetic_file = str(QtGui.QFileDialog.getSaveFileName(self, title, '', file_choices, options=QtGui.QFileDialog.DontConfirmOverwrite))
msg = "Line cosmetic file '{}' not valid!".format(cosmetic_file)
if cosmetic_file and not self.isValidFilename(cosmetic_file):
QtGui.QMessageBox.critical(self, 'pySSN', msg, QtGui.QMessageBox.Ok )
cosmetic_file = None
if cosmetic_file:
self.sp.set_conf('do_cosmetik', True)
dir_ = os.path.dirname(cosmetic_file)
if dir_ == os.getcwd():
cosmetic_file = cosmetic_file.split('/')[-1]
self.sp.set_conf('fic_cosmetik', cosmetic_file)
self.sp.fic_cosmetik = cosmetic_file
if self.sp is not None:
self.set_status_text()
if self.axes is not None:
self.adjust()
def empty_cosmetic_file(self):
if self.sp.fic_cosmetik is None or self.sp.phyat_file is None:
return
title = 'pySSN: cosmetic file'
msg = 'All lines in the cosmetic file will be removed.\nConfirm?'
ret = QtGui.QMessageBox.question(self, title, msg, QtGui.QMessageBox.Ok, QtGui.QMessageBox.Cancel )
if ret == QtGui.QMessageBox.Ok:
f = open(self.sp.fic_cosmetik, 'w')
f.close()
def order_lines(self, lines):
if lines is None:
return None
numbers = []
for line in lines:
line_num = int(self.sp.fieldStrFromLine(line,'num'))
numbers.append(line_num)
lines = [x for _,x in sorted(zip(numbers, lines))]
return lines
def remove_duplicate_lines(self, lines):
if lines is None:
return None
numbers = []
output = []
for line in lines:
line_num = int(self.sp.fieldStrFromLine(line,'num'))
if line_num not in numbers:
numbers.append(line_num)
output.append(line)
return output
def order_cosmetic_file(self):
if self.sp.fic_cosmetik is None or not os.path.isfile(self.sp.fic_cosmetik):
return
f = open(self.sp.fic_cosmetik, 'r')
cosmetic_lines = f.readlines()
f.close()
cosmetic_lines = self.order_lines(cosmetic_lines)
n0 = len(cosmetic_lines)
cosmetic_lines = self.remove_duplicate_lines(cosmetic_lines)
n1 = len(cosmetic_lines)
f = open(self.sp.fic_cosmetik, 'w')
f.writelines(cosmetic_lines)
f.close()
if n0 > n1:
s = ' and the duplicate lines removed'
else:
s = ''
msg = 'The cosmetic \'{0:}\' file was ordered{1:}.'.format(self.sp.fic_cosmetik, s)
self.statusBar().showMessage(msg, 4000)
def clean_cosmetic_file(self):
def ShowCleanMessage(UnchangedLineList):
nUL = len(UnchangedLineList)
if nUL == 1:
s1 = ''
s2 = 'was'
s3 = 'this line'
elif nUL > 1:
s1 = 's'
s2 = 'were'
s3 = 'these lines'
msgBox = QtGui.QMessageBox()
msgBox.setIcon(QtGui.QMessageBox.Question)
msgBox.title = 'pySSN: cosmetic file'
msg = '{0:} unchanged line{1:} in the cosmetic file {2:} found.'.format(nUL, s1, s2)
msgBox.setText(msg)
msgBox.setInformativeText('Do you want to delete {:}?\n'.format(s3))
detailedText = 'Unchanged line{:}:\n\n'.format(s1)
for i in UnchangedLineList:
detailedText = detailedText + str(i) + '\n'
msgBox.setDetailedText(detailedText)
DelButton = msgBox.addButton(self.tr("Delete"), QtGui.QMessageBox.ActionRole)
s = 'Delete from the cosmetic file all unchanged lines'
if self.enable_tooltips_action.isChecked():
DelButton.setToolTip(s)
msgBox.addButton(QtGui.QMessageBox.Cancel)
answer = msgBox.exec_()
if msgBox.clickedButton() == DelButton:
answer = True
else:
answer = False
return answer
if self.sp.fic_cosmetik is None or self.sp.phyat_file is None:
return
#if not self.sp.get_conf('clean_cosmetic_file'):
# return
if not os.path.isfile(self.sp.fic_cosmetik):
return
f = open(self.sp.fic_cosmetik, 'r')
cosmetic_lines = f.readlines()
f.close()
UnchangedLineList = []
ChangedLines = []
for i in range(len(cosmetic_lines)):
line_c = cosmetic_lines[i].rstrip()
line_num = int(self.sp.fieldStrFromLine(line_c,'num'))
if self.sp.cosmetic_line_unchanged(line_c):
UnchangedLineList.append(line_num)
else:
ChangedLines.append(line_c + '\n')
if len(UnchangedLineList) > 0:
ret = ShowCleanMessage(UnchangedLineList)
if ret == True:
f = open(self.sp.fic_cosmetik, 'w')
f.writelines(ChangedLines)
f.close()
else:
msg = 'No unchanged line in the cosmetic file {:}'.format(self.sp.fic_cosmetik)
self.statusBar().showMessage(msg, 4000)
def match_cosmetic_phyat_files(self):
def ShowErrorMessage():
msg = 'The wavelength or intensity in the cosmetic file does not match that in the atomic database.\n\n' \
'Do you want to try to automatically correct the cosmetic file?'
msgBox = QtGui.QMessageBox()
msgBox.setText("Error in cosmetic file for line: " + str(line_num))
msgBox.setInformativeText(msg)
msgBox.addButton(QtGui.QMessageBox.Yes)
msgBox.addButton(QtGui.QMessageBox.YesToAll)
msgBox.addButton(QtGui.QMessageBox.No)
msgBox.addButton(QtGui.QMessageBox.NoToAll)
msgBox.setDefaultButton(QtGui.QMessageBox.Yes)
answer = msgBox.exec_()
return answer
def ShowFinalMessage(nErr, nCor, nUnCor, nNfd, UnCorList, NotFound):
msgBox = QtGui.QMessageBox()
msgBox.setText('pySSN: error in cosmetic file')
if nCor > 0:
s0 = 'Rerun the synthesis to take into account the changes.\n\n'
else:
s0 = ''
if nUnCor > 0:
s1 = 'The cosmetic data for lines that still have problems will be ignored. ' \
'Do you want to delete them from the cosmetic file?'
else:
s1 = ''
msg = 'Number of lines with problems: {0:}\n' \
'Number of corrected lines: {1:}\n' \
'Number of uncorrected lines: {2:}\n' \
'Number of lines not found in the atomic database: {3:}\n\n' \
'{4:}{5:}'.format(nErr, nCor, nUnCor, nNfd, s0, s1)
msgBox.setInformativeText(msg)
if nNfd > 0:
detailedText = 'Lines not found:\n\n'
for i in NotFound:
detailedText = detailedText + i + '\n'
detailedText = detailedText + '\n'
else:
detailedText = ''
if nUnCor > 0:
detailedText = detailedText + 'Lines not corrected:\n\n'
for i in UnCorList:
detailedText = detailedText + i + '\n'
msgBox.setDetailedText(detailedText)
DelAllButton = msgBox.addButton(self.tr("Delete all"), QtGui.QMessageBox.ActionRole)
DelNotFndButton = msgBox.addButton(self.tr("delete not found"), QtGui.QMessageBox.ActionRole)
DelUncorButton = msgBox.addButton(self.tr("delete uncorrected"), QtGui.QMessageBox.ActionRole)
if self.enable_tooltips_action.isChecked():
s = 'Delete from the cosmetic file all lines that still have problems'
DelAllButton.setToolTip(s)
s = 'Delete from the cosmetic file the lines not found in the atomic database'
DelNotFndButton.setToolTip(s)
s = 'Delete from the cosmetic file the uncorrected lines'
DelUncorButton.setToolTip(s)
msgBox.addButton(QtGui.QMessageBox.Cancel)
msgBox.setMaximumHeight(16777215)
msgBox.setMinimumHeight(800)
# It does not expand! Why?
msgBox.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
msgBox.setSizeGripEnabled(True)
if nUnCor == 0:
DelUncorButton.setEnabled(False)
DelAllButton.setEnabled(False)
if nNfd == 0:
DelNotFndButton.setEnabled(False)
DelAllButton.setEnabled(False)
answer = msgBox.exec_()
if msgBox.clickedButton() == DelAllButton:
answer = ['DelNotFnd', 'DelUncor']
elif msgBox.clickedButton() == DelNotFndButton:
answer = ['DelNotFnd']
elif msgBox.clickedButton() == DelUncorButton:
answer = ['DelUncor']
else:
answer = []
return answer
if self.sp.fic_cosmetik is None:
return
if os.path.isfile(self.sp.fic_cosmetik):
cosmetik_arr, errorMsg = self.sp.read_cosmetik()
if len(errorMsg) > 0:
self.sp.do_cosmetik = False
self.sp.set_conf('do_cosmetik', False)
title = 'Error in cosmetic file: '
msg = 'Unable to read cosmetic data from file \'{}\':{}\n\nLine cosmetics will be disabled!'.format(self.sp.get_conf('fic_cosmetik'), errorMsg)
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
else:
log_.warn('{}: {}'.format(title, msg), calling=self.calling)
return
ret = None
f = open(self.sp.fic_cosmetik, 'r')
cosmetic_lines = f.readlines()
f.close()
ErrorList = []
CorrectedList = []
UnCorList = []
NotFound =[]
k = self.sp.field_pos['id']
keys = [ 'lambda', 'l_shift', 'i_rel', 'i_cor' ]
for i in range(len(cosmetic_lines)):
line_c = cosmetic_lines[i].rstrip()
line_num = int(self.sp.fieldStrFromLine(line_c,'num'))
cosmeticLineOk = self.sp.cosmetic_line_ok(line_c)
if cosmeticLineOk == None:
NotFound.append(line_c[:k])
ErrorList.append(line_c[:k])
elif cosmeticLineOk == False:
ErrorList.append(line_c[:k])
if ret != QtGui.QMessageBox.YesToAll and ret != QtGui.QMessageBox.NoToAll:
ret = ShowErrorMessage()
if ret == QtGui.QMessageBox.Yes or ret == QtGui.QMessageBox.YesToAll:
CorrectedList.append(line_c[:k])
line = self.sp.read_line(self.sp.phyat_file, line_num)
line = line.rstrip()
v0 = {i: np.float(self.sp.fieldStrFromLine(line, i)) for i in keys}
v1 = {i: np.float(self.sp.fieldStrFromLine(line_c, i)) for i in keys}
l_shift = v1['lambda'] + v1['l_shift'] - v0['lambda']
i_cor = v1['i_cor'] * v1['i_rel'] / v0['i_rel']
l_shift_str = self.rightFormat(str(l_shift), 'l_shift')
i_cor_str = self.rightFormat(str(i_cor), 'i_cor')
line = self.sp.replace_field(line, 'l_shift', l_shift_str)
line = self.sp.replace_field(line, 'i_cor', i_cor_str)
log_.warn('(corrected) ' + line + '\n', calling=self.calling)
self.sp.replace_line(self.sp.fic_cosmetik, line)
else:
UnCorList.append(line_c[:k])
log_.warn('Not corrected.\n', calling=self.calling)
nErr = len(ErrorList)
nCor = len(CorrectedList)
nUnCor = len(UnCorList)
nNfd = len(NotFound)
if nErr > 0:
answer = ShowFinalMessage(nErr, nCor, nUnCor, nNfd, UnCorList, NotFound)
if 'DelNotFnd' in answer:
for i in NotFound:
self.sp.remove_line(self.sp.fic_cosmetik, int(i))
if 'DelUncor' in answer:
for i in UnCorList:
self.sp.remove_line(self.sp.fic_cosmetik, int(i))
def set_status_text(self):
if self.sp is None:
return
if self.sp.phyat_file == 'NO_phyat.dat':
self.status_text.setText('pySSN, v {}. init file: {}, No synthesis'.format(__version__,
self.sp.config_file.split('/')[-1]))
elif self.sp.get_conf('do_cosmetik'):
self.status_text.setText('pySSN, v {}. init file: {}, at. data: {}, model: {}, cosmetic: {}'.format(__version__,
self.sp.config_file.split('/')[-1],
self.sp.phyat_file.split('/')[-1],
self.sp.get_conf('fic_modele').split('/')[-1],
self.sp.get_conf('fic_cosmetik').split('/')[-1]))
else:
self.status_text.setText('pySSN, v {}. init file: {}, at. data: {}, model: {}, No cosmetic'.format(__version__,
self.sp.config_file.split('/')[-1],
self.sp.phyat_file.split('/')[-1],
self.sp.get_conf('fic_modele').split('/')[-1]))
def test_init_file(self):
if self.sp == None:
self.showErrorBox = False
self.showErrorBox = True
invalidCommands = []
if os.path.isfile(self.init_file_name):
f = open(self.init_file_name, 'r')
lines = f.readlines()
f.close()
else:
invalidCommands.append('\nFile not found')
lines = []
triple_quoted_string_found = False
newlines = []
rows = []
for i in range(len(lines)):
line = lines[i].split('#')[0].rstrip()
k = line.find('=')
if not (line.strip().startswith('#') or len(line.strip()) == 0):
if '"""' in line:
triple_quoted_string_found = not triple_quoted_string_found
if triple_quoted_string_found:
newlines.append(line.split('#')[0].rstrip())
rows.append(i+1)
else:
s = line.split('#')[0].rstrip()
if len(s.strip()) > 0:
newlines[-1] += '\n' + s
else:
if len(line) == len(line.lstrip()) and not triple_quoted_string_found:
newlines.append(line.split('#')[0].rstrip())
rows.append(i+1)
else:
s = line.split('#')[0].rstrip()
if len(s.strip()) > 0:
newlines[-1] += '\n' + s
for i in range(len(newlines)):
line = newlines[i]
line_list = line.split('\n')
if len(line_list) > 3:
line_str = line_list[0] + '\n' + line_list[1] + '\n' + line_list[2] + '\n...'
else:
line_str = line
try:
exec(line)
except IndentationError:
invalidCommands.append('\nIndentation error, line {}:\n{}'.format(rows[i],line_str))
except SyntaxError:
if '"""' in line and triple_quoted_string_found:
invalidCommands.append('\nUnclosed triple-quotation mark, line {}:\n{}'.format(rows[i],line_str))
else:
invalidCommands.append('\nInvalid syntax, line {}:\n{}'.format(rows[i],line_str))
except(AttributeError, NameError):
invalidCommands.append('\nUndefined variable name or attribute, line {}:\n{}'.format(rows[i],line_str))
except:
invalidCommands.append('\nUndefined error, line {}:\n{}'.format(rows[i],line_str))
if len(invalidCommands) > 0:
title = 'Fatal error'
msg = 'Error in the initialization file \'{0}\': '.format(self.init_file_name)
for line in invalidCommands:
msg = msg + '\n' + line
if self.showErrorBox:
if self.sp == None:
buttom = QtGui.QMessageBox.Abort
else:
buttom = QtGui.QMessageBox.Cancel
QtGui.QMessageBox.critical(self, title, msg, buttom)
else:
log_.warn('{}: {}'.format(title, msg), calling=self.calling)
return False
return True
def start_spectrum(self):
init_file = self.init_file_name.split('/')[-1]
dir_ = self.init_file_name.split(init_file)[0]
if dir_ == '':
dir_ = './'
self.directory = dir_
if not self.test_init_file():
if self.sp == None:
sys.exit()
else:
return
self.sp = spectrum(config_file=self.init_file_name)
if self.sp.errorMsg:
if self.showErrorBox:
msg = 'Synthesis not possible. \n\n{}'.format(self.sp.errorMsg)
msg = self.sp.errorMsg
ret = QtGui.QMessageBox.critical(self, 'Critical Error', msg, QtGui.QMessageBox.Abort, QtGui.QMessageBox.Ignore)
if ret == QtGui.QMessageBox.Abort:
sys.exit()
self.sp.errorMsg = ''
if len(self.sp.read_obs_error) > 0:
title = 'Error reading observations'
msg = self.sp.read_obs_error
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
else:
log_.warn('{}: {}'.format(title, msg), calling=self.calling)
if ( self.sp.get_conf('fic_cosmetik') is None or
self.sp.get_conf('fic_cosmetik') == '' ):
self.sp.set_conf('do_cosmetik', False)
if self.sp.get_conf('do_synth') and self.sp.get_conf('do_cosmetik'):
self.match_cosmetic_phyat_files()
if self.sp.get_conf('clean_cosmetic_file'):
self.clean_cosmetic_file()
if self.sp.get_conf('order_cosmetic_file'):
self.order_cosmetic_file()
self.set_status_text()
self.axes = None
self.sp.ax2_fontsize = 6
self.sp_norm_box.setText('{}'.format(self.sp.get_conf('sp_norm')))
self.obj_velo_box.setText('{}'.format(self.sp.get_conf('obj_velo')))
self.ebv_box.setText('{}'.format(self.sp.get_conf('e_bv', 0)))
self.resol_box.setText('{}'.format(self.sp.get_conf('resol')))
self.cut2_box.setText('{}'.format(self.sp.get_conf('cut_plot2')))
self.magenta_box.setText('{}'.format(self.sp.plot_magenta))
self.magenta_label_box.setText('{}'.format(self.sp.label_magenta))
self.cyan_box.setText('{}'.format(self.sp.plot_cyan))
self.cyan_label_box.setText('{}'.format(self.sp.label_cyan))
self.sp_min_box.setText('{}'.format(self.sp.get_conf('limit_sp')[0]))
self.sp_max_box.setText('{}'.format(self.sp.get_conf('limit_sp')[1]))
self.init_axes()
self.xlim_min_box.setText('{}'.format(self.x_plot_lims[0]))
self.xlim_max_box.setText('{}'.format(self.x_plot_lims[1]))
self.y1lim_min_box.setText('{}'.format(self.y1_plot_lims[0]))
self.y1lim_max_box.setText('{}'.format(self.y1_plot_lims[1]))
self.y3lim_min_box.setText('{}'.format(self.y3_plot_lims[0]))
self.y3lim_max_box.setText('{}'.format(self.y3_plot_lims[1]))
self.verbosity_ag.actions()[self.sp.get_conf('log_level', 0)].setChecked(True)
self.line_tick_ax_ag.actions()[self.sp.get_conf('line_tick_ax', 0)].setChecked(True)
self.line_tick_pos_ag.actions()[self.sp.get_conf('line_tick_pos', 0)].setChecked(True)
self.residual_GroupBox.setChecked(self.sp.get_conf('qt_plot_residuals', True))
self.selected_ions_action.setChecked(self.sp.get_conf('show_selected_ions_only', False))
self.ion_cb.setChecked(self.sp.get_conf('show_selected_ions_only', False))
self.selected_intensities_action.setChecked(self.sp.get_conf('show_selected_intensities_only', False))
self.cut_cb.setChecked(self.sp.get_conf('show_selected_intensities_only', False))
self.diff_lines_ag.actions()[self.sp.get_conf('diff_lines_by', 0)].setChecked(True)
self.line_tick_ax_ag.actions()[self.sp.get_conf('line_tick_ax', 0)].setChecked(True)
self.editing_lines_action.setChecked(self.sp.get_conf('qt_allow_editing_lines', False))
self.update_lines_action.setChecked(self.sp.get_conf('qt_update_after_editing_lines', False))
self.plot_cont_action.setChecked(self.sp.get_conf('cont_plot', False))
self.show_line_ticks_action.setChecked(self.sp.get_conf('show_line_ticks', False))
self.plot_lines_action.setChecked(self.sp.get_conf('plot_lines_of_selected_ions', False))
self.lineIDs_GroupBox.setChecked(self.sp.get_conf('show_line_ticks', False) or self.sp.get_conf('plot_lines_of_selected_ions', False))
try:
selected_ions = self.sp.get_conf('selected_ions')
s = ''
for ion in selected_ions:
s = s + ion + ', '
if not s == '':
s = s[:-2]
self.ion_box.setText(s)
self.set_ion()
except:
self.ion_box.setText('')
self.line_sort_ag.actions()[self.sp.get_conf('save_lines_sort', 0)].setChecked(True)
self.show_header_action.setChecked(self.sp.get_conf('save_lines_header', False))
self.get_line_fields_to_print()
self.readOnlyCells_bg_color = QtGui.QColor('white')
self.editableCells_bg_color = QtGui.QColor('lightgreen')
if 'linux' in sys.platform and 'Plastique' in self.style_list:
default_style = 'Plastique'
elif 'darwin' in sys.platform and 'Macintosh (aqua)' in self.style_list:
default_style = 'Macintosh (aqua)'
else:
default_style = self.style_list[0]
if self.sp.get_conf('qt_style') not in self.style_list:
if 'QT_STYLE' in os.environ:
if os.environ['QT_STYLE'] in self.style_list:
self.sp.set_conf('qt_style', os.environ['QT_STYLE'])
else:
log_.warn('Unknown Qt style {}, using {}'.format(os.environ['QT_STYLE'], default_style))
self.sp.set_conf('qt_style', default_style)
else:
self.sp.set_conf('qt_style', default_style)
index_style = self.style_list.index(self.sp.get_conf('qt_style'))
self.style_ag.actions()[index_style].setChecked(True)
QtGui.qApp.setStyle(self.sp.get_conf('qt_style'))
self.enable_tooltips_action.setChecked(self.sp.get_conf('qt_enable_tooltips', True))
self.enable_tooltips_action_clicked()
self.adjust_fig_action.setChecked(self.sp.get_conf('fig_adjust', True))
def sp_norm(self):
if self.sp is None:
return
if not self.validate_sp_norm():
return
old_sp_norm = self.sp.get_conf('sp_norm')
new_sp_norm = np.float(self.sp_norm_box.text())
if old_sp_norm == new_sp_norm:
return
log_.message('Changing sp_norm. Old: {}, New: {}'.format(old_sp_norm, new_sp_norm), calling=self.calling)
self.statusBar().showMessage('Changing intensity scale of the observed spectrum ...')
QtGui.QApplication.processEvents()
self.sp.renorm(new_sp_norm)
self.on_draw()
def obj_velo(self):
if self.sp is None:
return
if not self.validate_obj_velo():
return
old_obj_velo = self.sp.get_conf('obj_velo')
new_obj_velo = np.float(self.obj_velo_box.text())
if old_obj_velo == new_obj_velo:
return
self.sp.iterpolate_velocity = False
self.sp.set_conf('obj_velo', new_obj_velo)
log_.message('Changing obj_velo. Old: {}, New: {}'.format(old_obj_velo, new_obj_velo), calling=self.calling)
self.statusBar().showMessage('Executing doppler correction of the observed spectrum ...')
QtGui.QApplication.processEvents()
self.sp.init_obs(obj_velo=new_obj_velo)
self.sp.init_red_corr()
self.sp.make_continuum()
self.sp.run(do_synth = self.sp.do_synth, do_read_liste = True, do_profiles=False)
self.on_draw()
def ebv(self):
if self.sp is None:
return
if not self.validate_ebv():
return
old_ebv = self.sp.get_conf('e_bv')
new_ebv = np.float(self.ebv_box.text())
if old_ebv == new_ebv and not self.cont_par_changed:
return
log_.message('Changing E B-V. Old: {}, New: {}'.format(old_ebv, new_ebv), calling=self.calling)
self.statusBar().showMessage('Changing color excess E(B-V) ...', 4000)
self.statusBar().showMessage('Executing reddening correction of the synthetic spectrum ...')
QtGui.QApplication.processEvents()
self.sp.set_conf('e_bv', new_ebv)
self.sp.init_red_corr()
self.sp.make_continuum()
self.sp.run(do_synth = self.sp.do_synth, do_read_liste = False, do_profiles=False)
self.on_draw()
self.cont_par_changed = False
def rerun(self):
if not self.validate_synthesis_parameters():
return
if ( self.x_plot_lims[0] < np.float(self.sp_min_box.text()) or
self.x_plot_lims[1] > np.float(self.sp_max_box.text()) ):
self.xlim_min_box.setText(self.sp_min_box.text())
self.xlim_max_box.setText(self.sp_max_box.text())
self.statusBar().showMessage('Rerunning synthesis ...')
QtGui.QApplication.processEvents()
self.sp.set_conf('limit_sp', (np.float(self.sp_min_box.text()), np.float(self.sp_max_box.text())))
self.sp.set_conf('resol', np.int(self.resol_box.text()))
self.sp.set_conf('obj_velo', np.float(self.obj_velo_box.text()))
self.sp.set_conf('sp_norm', np.float(self.sp_norm_box.text()))
self.sp.set_conf('e_bv', np.float(self.ebv_box.text()))
self.sp.init_obs()
self.sp.init_red_corr()
self.sp.make_continuum()
self.sp.run()
self.set_plot_limits_and_draw()
def adjust(self):
if self.sp is None:
return
self.sp.errorMsg = ''
self.statusBar().showMessage('Running update ...')
QtGui.QApplication.processEvents()
self.sp_norm()
self.obj_velo()
self.ebv()
if self.sp.errorMsg:
if self.showErrorBox:
msg = self.sp.errorMsg
QtGui.QMessageBox.warning(self, 'Update error', msg, QtGui.QMessageBox.Ok)
return 0
ndiff, errorMsg = self.sp.adjust()
if ndiff == -1:
self.sp.do_cosmetik = False
self.sp.set_conf('do_cosmetik', False)
self.sp.fic_cosmetik
self.set_status_text()
title = 'Error in cosmetic file'
msg = 'Unable to read from file \'{}\'\nChanging to \'no cosmetic\':\n{}'.format(self.sp.get_conf('fic_cosmetik'), errorMsg)
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
else:
log_.warn('{}: {}'.format(title, msg), calling=self.calling)
if ndiff > 0:
self.on_draw()
self.statusBar().showMessage('Update finished.', 4000)
return ndiff
def apply_post_proc(self):
path = str(self.post_proc_file or '')
file_choices = "Python files (*.py) (*.py);;All files (*) (*)"
title = 'Open post-process file'
path = unicode(QtGui.QFileDialog.getOpenFileName(self, title, path, file_choices))
path = path.split('/')[-1]
if not path:
return
try:
user_module = {}
execfile(path, user_module)
self.post_proc = user_module['post_proc']
self.post_proc_file = path
log_.message('function post_proc read from {}'.format(self.post_proc_file))
except:
self.post_proc = None
title = 'Error reading post-process file'
msg = 'Unable to read post-process file \'{}\''.format(path)
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
return
try:
self.post_proc(self.fig)
self.canvas.draw()
except:
title = 'Error executing post-process'
msg = 'Error in post-process file \'{}\''.format(self.post_proc_file)
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
def update_profile(self):
if self.sp is None:
return
self.sp.run(do_synth = True, do_read_liste = False, do_profiles=True)
self.on_draw()
def cut2(self):
if self.sp is None:
return
if not self.validate_cut():
return
self.selected_intensities_action.setChecked(True)
self.sp.set_conf('show_selected_intensities_only', True)
self.cut_cb.setChecked(True)
self.draw_ion()
def get_ion_str(self,s):
s = s.strip()
s = s.replace(' ', '_')
if s.isdigit():
line = self.sp.get_line_from_reduce_code(s)
if line is None:
s = ''
else:
s = self.sp.fieldStrFromLine(line,'id').strip()
return s
def set_ion(self):
if self.sp is None:
return
sList = []
s = self.ion_box.text()
k = s.indexOf(',')
while k >= 0:
s0 = self.get_ion_str(str(s[:k]))
if s0 != '' and s0 != '*':
sList.append(s0)
s = s[k+1:]
k = s.indexOf(',')
s0 = self.get_ion_str(str(s))
if s0 != '' and s0 != '*':
sList.append(s0)
s = ''
for s0 in sList:
s = s + s0 + ', '
s = s[:-2]
for item in sList[:]:
sList.remove(item)
if item[-1] == '*':
item = item[:-1]
this_ion_only = False
else:
this_ion_only = True
self.sp.set_ion_list()
if item.ljust(9) in self.sp.liste_raies['id']:
if self.sp.true_ion(item) == item or this_ion_only:
sList = sList + [item]
if not this_ion_only:
sList = sList + self.sp.get_all_ions_from_ion(item)
elif item.ljust(9) in self.sp.sp_theo['raie_ref']['id']:
if self.sp.true_ion(item) == item or this_ion_only:
sList = sList + [item]
if not this_ion_only:
sList = sList + self.sp.get_all_ions_from_ion(item)
else:
ion_list = self.sp.get_ions_from_element(item)
sList = sList + ion_list
self.sp.set_conf('selected_ions', sList)
self.ion_box.setText(s)
def set_refline_to_info_box(self,j):
if self.sp.get_conf('diff_lines_by') == 0 and len(self.sp.selected_ions_data) > 0:
if j == -1:
j = 0
s = str(self.sp.selected_ions_data[j][2][0])
self.line_info_box.setText(s)
def draw_ion(self):
if self.cut_cb.isChecked():
if self.validate_cut():
self.sp.set_conf('cut_plot2', np.float(self.cut2_box.text()))
else:
return
self.set_ion()
self.sp.set_conf('index_of_current_ion', -1)
self.sp.set_selected_ions_data()
self.set_refline_to_info_box(-1)
self.on_draw()
def line_info(self):
if self.sp is None:
return
msg = ''
s = str(self.line_info_box.text())
if s == '':
return
w = self.sp.field_width['num'] - 1
s = s[-w:]
if s[0] == '0':
s = s[1:]
self.line_info_box.setText(s)
try:
new_ref = int(s)
except ValueError:
msg = 'Invalid input.\n It is not an integer'
if msg == '':
line = self.sp.get_line_from_reduce_code(s)
if line is None:
msg = 'No line unambiguously associated with this number.'
if msg == '':
s = self.sp.fieldStrFromLine(line,'num').strip()
self.line_info_box.setText(s)
self.line_info_ref = int(s)
if self.sp.get_conf('qt_show_dialogs', True):
self.show_line_info_dialog()
else:
self.sp.line_info(new_ref, sort='i_rel')
else:
title = 'Error in line number'
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
def magenta_line(self):
if self.sp is None:
return
ref_str = self.magenta_box.text()
ref_txt = self.magenta_label_box.text()
if ref_str == '':
self.sp.plot_magenta = None
self.sp.label_magenta = ''
self.on_draw()
else:
new_ref = np.int(ref_str)
self.sp.plot_magenta = new_ref
self.sp.label_magenta = ref_txt
self.on_draw()
def cyan_line(self):
if self.sp is None:
return
ref_str = self.cyan_box.text()
ref_txt = self.cyan_label_box.text()
if ref_str == '':
self.sp.plot_cyan = None
self.sp.label_cyan = ''
self.on_draw()
else:
new_ref = np.int(ref_str)
self.sp.plot_cyan = new_ref
self.sp.label_cyan = ref_txt
self.on_draw()
def diff_lines(self):
self.sp.set_conf('index_of_current_ion', -1)
self.set_plot_ax2()
if self.sp.get_conf('diff_lines_by') == 0 and len(self.sp.selected_ions_data) > 0:
s = str(self.sp.selected_ions_data[0][2][0])
self.line_info_box.setText(s)
def set_plot_ax2(self):
self.sp.set_selected_ions_data()
k = self.line_tick_ax_list.index(self.line_tick_ax_ag.checkedAction().text())
self.sp.set_conf('line_tick_ax',k)
k = self.line_tick_pos_list.index(self.line_tick_pos_ag.checkedAction().text())
self.sp.set_conf('line_tick_pos',k)
k = self.diff_lines_list.index(self.diff_lines_ag.checkedAction().text())
self.sp.set_conf('diff_lines_by',k)
if self.show_line_ticks_action.isChecked():
self.make_axes()
def verbosity(self):
verbosity = self.verbosity_list.index(self.verbosity_ag.checkedAction().text())
if verbosity == log_.level:
return
log_.debug('Verbosity changed from {} to {}'.format(log_.level, verbosity), calling=self.calling)
log_.level = verbosity
self.sp.set_conf('log_level', verbosity)
def style(self):
new_style_str = str(self.style_ag.checkedAction().text())
old_style_str = self.sp.get_conf('qt_style')
if new_style_str == old_style_str:
return
self.sp.set_conf('qt_style', new_style_str)
QtGui.qApp.setStyle(new_style_str)
log_.debug('Widget style changed from {} to {}'.format(old_style_str, new_style_str), calling=self.calling)
def update_lim_boxes(self):
xformat = '{:.1f}'
yformat = '{1:.{0}f}'
min_diff = 2
if abs(self.x_plot_lims[1] - self.x_plot_lims[0]) < min_diff:
m = (self.x_plot_lims[0] + self.x_plot_lims[1])/2
x_lims = (m - min_diff/2,m + min_diff/2)
else:
x_lims = self.x_plot_lims
min_diff = 0.2
if abs(self.y1_plot_lims[1] - self.y1_plot_lims[0]) < min_diff:
m = (self.y1_plot_lims[0] + self.y1_plot_lims[1])/2
y1_lims = (m - min_diff/2,m + min_diff/2)
else:
y1_lims = self.y1_plot_lims
min_diff = 0.2
if abs(self.y3_plot_lims[1] - self.y3_plot_lims[0]) < min_diff:
m = (self.y3_plot_lims[0] + self.y3_plot_lims[1])/2
y3_lims = (m - min_diff/2,m + min_diff/2)
else:
y3_lims = self.y3_plot_lims
if self.x_plot_lims[0] != np.float(self.xlim_min_box.text()):
self.xlim_min_box.setText(xformat.format(x_lims[0]))
if self.x_plot_lims[1] != np.float(self.xlim_max_box.text()):
self.xlim_max_box.setText(xformat.format(x_lims[1]))
delta = abs(y1_lims[1]-y1_lims[0])
if delta < 2:
precision = 2
else:
precision = 1
if self.y1_plot_lims[0] != np.float(self.y1lim_min_box.text()):
self.y1lim_min_box.setText(yformat.format(precision, y1_lims[0]))
if self.y1_plot_lims[1] != np.float(self.y1lim_max_box.text()):
self.y1lim_max_box.setText(yformat.format(precision, y1_lims[1]))
delta = abs(y3_lims[1]-y3_lims[0])
if delta < 2:
precision = 2
else:
precision = 1
if self.y3_plot_lims[0] != np.float(self.y3lim_min_box.text()):
self.y3lim_min_box.setText(yformat.format(precision, y3_lims[0]))
if self.y3_plot_lims[1] != np.float(self.y3lim_max_box.text()):
self.y3lim_max_box.setText(yformat.format(precision, y3_lims[1]))
self.set_plot_limits_and_draw()
def validate_input(self, editBox, field, title, varType = 'float', showError = True):
value = editBox.text()
if value == None:
return False
if ( ( varType == 'float' and not self.isFloat(value) ) or \
( varType == 'integer' and not self.isInteger(value) ) or \
( varType == 'positive integer' and not self.isPositiveInteger(value) ) or \
( varType == 'positive odd integer' and not self.isPositiveOdd(value) ) ):
msg = '{} should be a {}'.format(field, varType)
msg.replace('a integer', 'an integer')
editBox.setFocus()
if showError:
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
else:
log_.warn('{}: {}'.format(title, msg), calling=self.calling)
return False
else:
return True
def validate_sp_min(self):
return self.validate_input(self.sp_min_box, 'xmin for the synthesis', 'Input error', 'float')
def validate_sp_max(self):
return self.validate_input(self.sp_max_box, 'xmax for the synthesis', 'Input error', 'float')
def validate_sp_norm(self):
return self.validate_input(self.sp_norm_box, 'normalization factor', 'Input error', 'float')
def validate_ebv(self):
return self.validate_input(self.ebv_box, 'color excess E(B-V)', 'Input error', 'float')
def validate_obj_velo(self):
return self.validate_input(self.obj_velo_box, 'radial velocity', 'Input error', 'float')
def validate_resol(self):
return self.validate_input(self.resol_box, 'rebinning factor', 'Input error', 'positive odd integer')
def validate_xlim_min(self, showError = True):
return self.validate_input(self.xlim_min_box, 'xmin', 'Invalid plot limit', 'float', showError)
def validate_xlim_max(self, showError = True):
return self.validate_input(self.xlim_max_box, 'xmax', 'Invalid plot limit', 'float', showError)
def validate_y1lim_min(self):
return self.validate_input(self.y1lim_min_box, 'ymin', 'Invalid plot limit', 'float')
def validate_y1lim_max(self):
return self.validate_input(self.y1lim_max_box, 'ymax', 'Invalid plot limit', 'float')
def validate_y3lim_min(self):
return self.validate_input(self.y3lim_min_box, 'residual ymin', 'Invalid plot limit', 'float')
def validate_y3lim_max(self):
return self.validate_input(self.y3lim_max_box, 'residual ymax', 'Invalid plot limit', 'float')
def validate_cut(self):
return self.validate_input(self.cut2_box, 'cut', 'Input error', 'float')
def sp_lim_in_range(self):
xmin = np.float(self.sp_min_box.text())
xmax = np.float(self.sp_max_box.text())
if ( xmin < xmax - 9.999 ) and ( xmin > 0. ) and ( xmax < 200000000.):
return True
else:
if self.showErrorBox:
QtGui.QMessageBox.critical(self, 'Invalid synthesis limits', 'The acceptable values are:\n\n xmax - xmin > 10,\n xmin > 0,\n xmax < 200000000.',
QtGui.QMessageBox.Ok )
else:
log_.warn('Invalid synthesis limits', 'The acceptable values are:\n\n xmax - xmin > 10,\n xmin > 0,\n xmax < 200000000.', calling=self.calling)
return False
def validate_synthesis_parameters(self):
return ( self.validate_sp_min() and
self.validate_sp_max() and
self.sp_lim_in_range() and
self.validate_sp_norm() and
self.validate_obj_velo() and
self.validate_ebv() and
self.validate_resol() )
def validate_plot_parameters(self):
return ( self.validate_xlim_min() and
self.validate_xlim_max() and
self.validate_y1lim_min() and
self.validate_y1lim_max() and
self.validate_y3lim_min() and
self.validate_y3lim_max() )
def set_plot_limits_and_draw(self):
if not self.validate_plot_parameters():
return
self.x_plot_lims = (np.float(self.xlim_min_box.text()), np.float(self.xlim_max_box.text()))
self.y1_plot_lims = (np.float(self.y1lim_min_box.text()), np.float(self.y1lim_max_box.text()))
self.y3_plot_lims = (np.float(self.y3lim_min_box.text()), np.float(self.y3lim_max_box.text()))
self.sp.set_conf('x_plot_lims', self.x_plot_lims)
self.sp.set_conf('y1_plot_lims', self.y1_plot_lims)
self.sp.set_conf('y3_plot_lims', self.y3_plot_lims)
self.restore_axes()
self.draw_ion()
def set_limit_sp(self):
if not ( self.validate_sp_min() and
self.validate_sp_max() and
self.sp_lim_in_range() ):
return
limit_sp = (np.float(self.sp_min_box.text()), np.float(self.sp_max_box.text()))
self.sp.set_conf('limit_sp', limit_sp)
def set_limit_sp_and_run(self):
if str(self.sp_min_box.text()).strip() == '':
self.sp_min_box.setText('{:.1f}'.format(self.sp.w_min))
if str(self.sp_max_box.text()).strip() == '':
self.sp_max_box.setText('{:.1f}'.format(self.sp.w_max))
if not ( self.validate_sp_min() and
self.validate_sp_max() and
self.sp_lim_in_range() ):
return
old_limit_sp = self.sp.get_conf('limit_sp')
new_limit_sp = (np.float(self.sp_min_box.text()), np.float(self.sp_max_box.text()))
if old_limit_sp == new_limit_sp:
if not self.axes_fixed:
self.xlim_min_box.setText(self.sp_min_box.text())
self.xlim_max_box.setText(self.sp_max_box.text())
self.set_plot_limits_and_draw()
return
if not self.validate_xlim_min(False):
self.xlim_min_box.setText(self.sp_min_box.text())
if not self.validate_xlim_max(False):
self.xlim_max_box.setText(self.sp_max_box.text())
if ( np.float(self.xlim_min_box.text()) >= new_limit_sp[1] or
np.float(self.xlim_max_box.text()) <= new_limit_sp[0] ):
self.xlim_min_box.setText(self.sp_min_box.text())
self.xlim_max_box.setText(self.sp_max_box.text())
self.sp.set_conf('limit_sp', new_limit_sp)
log_.message('Changing limit_sp. Old: {}, New: {}'.format(old_limit_sp, new_limit_sp), calling=self.calling)
self.statusBar().showMessage('Changing the synthesis wavelength limits ...')
QtGui.QApplication.processEvents()
self.sp.init_obs()
self.sp.init_red_corr()
self.sp.make_continuum()
self.sp.run(do_synth = True, do_read_liste = True, do_profiles=False)
self.set_plot_limits_and_draw()
def resol(self):
if self.sp is None:
return
if not self.validate_resol():
return
old_resol = self.sp.get_conf('resol')
new_resol = np.int(self.resol_box.text())
if old_resol == new_resol:
return
self.sp.set_conf('resol', new_resol)
log_.message('Changing resol. Old: {}, New: {}'.format(old_resol, new_resol), calling=self.calling)
self.statusBar().showMessage('Changing rebinning factor ...')
QtGui.QApplication.processEvents()
self.sp.set_conf('resol', new_resol)
self.sp.init_obs()
self.sp.init_red_corr()
self.sp.make_continuum()
self.sp.run(do_synth = True, do_read_liste = True, do_profiles=False)
self.on_draw()
def leave_fig(self, event):
self.sp.firstClick = True
if ( self.x_plot_lims != self.axes.get_xlim() or
self.y1_plot_lims != self.axes.get_ylim() or
( self.axes3 is not None and self.y3_plot_lims != self.axes3.get_ylim() ) ):
limits_changed = True
else:
limits_changed = False
if not self.axes_fixed and limits_changed:
self.save_axes()
self.update_lim_boxes()
def fix_axes(self):
if self.fix_axes_cb.isChecked():
self.axes_fixed = True
else:
self.axes_fixed = False
def get_line_fields_to_print(self):
field_list = self.sp.get_conf('save_lines_fields')
for i in range(0,len(self.line_field_menu.actions())):
if self.line_print_dic.keys()[i] in field_list:
self.line_field_menu.actions()[i].setChecked(True)
else:
self.line_field_menu.actions()[i].setChecked(False)
def set_show_header(self):
if self.show_header_action.isChecked():
self.sp.set_conf('save_lines_header', True)
else:
self.sp.set_conf('save_lines_header', False)
def set_line_fields_to_print(self):
s = []
for i in range(0,len(self.line_field_menu.actions())):
if self.line_field_menu.actions()[i].isChecked():
s.append( self.line_print_dic.keys()[i])
self.sp.set_conf('save_lines_fields', s)
def save_lines(self):
self.sp.save_lines()
path = self.sp.get_conf('save_lines_filename')
self.statusBar().showMessage('Lines saved to file %s' % path, 4000)
def save_lines_as(self):
file_choices = "Text files (*.txt *.dat) (*.txt *.dat);;Tex files (*.tex) (*.tex);;CSV files (*.csv) (*.csv);;All Files (*) (*)"
filename = self.sp.get_conf('save_lines_filename')
extension = os.path.splitext(filename)[1][1:].lower()
if extension in ['txt','dat']:
selectedFilter = 'Text files (*.txt *.dat) (*.txt *.dat)'
elif extension in ['tex']:
selectedFilter = 'Tex files (*.tex) (*.tex)'
elif extension in ['csv']:
selectedFilter = 'CSV files (*.csv) (*.csv)'
else:
selectedFilter = 'All Files (*) (*)'
path = unicode(QtGui.QFileDialog.getSaveFileName(self, 'Save lines to file', filename, file_choices, selectedFilter))
if path:
self.sp.set_conf('save_lines_filename', path)
self.sp.save_lines()
self.statusBar().showMessage('Lines saved to file %s' % path, 4000)
def line_sort(self):
k = self.line_sort_list.index(self.line_sort_ag.checkedAction().text())
self.sp.set_conf('save_lines_sort',k)
def main_loc(init_filename=None, post_proc_file=None):
app = QtGui.QApplication(sys.argv)
form = AppForm(init_filename=init_filename, post_proc_file=post_proc_file)
form.show()
app.exec_()
return form.fig
def main_loc_obj(init_filename=None, post_proc_file=None):
app = QtGui.QApplication(sys.argv)
form = AppForm(init_filename=init_filename, post_proc_file=post_proc_file)
form.show()
app.exec_()
return form
def main():
parser = get_parser()
args = parser.parse_args()
log_.level = args.verbosity
app = QtGui.QApplication(sys.argv)
form = AppForm(init_filename=args.file, post_proc_file=args.post_proc)
#import pdb
#pdb.set_trace()
form.show()
app.exec_()
if __name__ == "__main__":
main()
| gpl-3.0 |
basnijholt/holoviews | holoviews/plotting/mpl/chart.py | 2 | 51054 | from __future__ import absolute_import, division, unicode_literals
from itertools import product
import param
import numpy as np
import matplotlib as mpl
from matplotlib import cm
from matplotlib.collections import LineCollection
from matplotlib.dates import DateFormatter, date2num
from ...core.dimension import Dimension, dimension_name
from ...core.options import Store, abbreviated_exception
from ...core.util import (
OrderedDict, match_spec, unique_iterator, basestring, max_range,
isfinite, datetime_types, dt_to_int, dt64_to_dt, search_indices,
unique_array, isscalar
)
from ...element import Raster, HeatMap
from ...operation import interpolate_curve
from ...util.transform import dim
from ..plot import PlotSelector
from ..util import compute_sizes, get_sideplot_ranges, get_min_distance
from .element import ElementPlot, ColorbarPlot, LegendPlot
from .path import PathPlot
from .plot import AdjoinedPlot, mpl_rc_context
from .util import mpl_version
class ChartPlot(ElementPlot):
"""
Baseclass to plot Chart elements.
"""
class CurvePlot(ChartPlot):
"""
CurvePlot can plot Curve and ViewMaps of Curve, which can be
displayed as a single frame or animation. Axes, titles and legends
are automatically generated from dim_info.
If the dimension is set to cyclic in the dim_info it will rotate
the curve so that minimum y values are at the minimum x value to
make the plots easier to interpret.
"""
autotick = param.Boolean(default=False, doc="""
Whether to let matplotlib automatically compute tick marks
or to allow the user to control tick marks.""")
interpolation = param.ObjectSelector(objects=['linear', 'steps-mid',
'steps-pre', 'steps-post'],
default='linear', doc="""
Defines how the samples of the Curve are interpolated,
default is 'linear', other options include 'steps-mid',
'steps-pre' and 'steps-post'.""")
relative_labels = param.Boolean(default=False, doc="""
If plotted quantity is cyclic and center_cyclic is enabled,
will compute tick labels relative to the center.""")
show_grid = param.Boolean(default=False, doc="""
Enable axis grid.""")
show_legend = param.Boolean(default=True, doc="""
Whether to show legend for the plot.""")
style_opts = ['alpha', 'color', 'visible', 'linewidth', 'linestyle', 'marker', 'ms']
_nonvectorized_styles = style_opts
_plot_methods = dict(single='plot')
def get_data(self, element, ranges, style):
with abbreviated_exception():
style = self._apply_transforms(element, ranges, style)
if 'steps' in self.interpolation:
element = interpolate_curve(element, interpolation=self.interpolation)
xs = element.dimension_values(0)
ys = element.dimension_values(1)
dims = element.dimensions()
if xs.dtype.kind == 'M' or (len(xs) and isinstance(xs[0], datetime_types)):
dimtype = element.get_dimension_type(0)
dt_format = Dimension.type_formatters.get(dimtype, '%Y-%m-%d %H:%M:%S')
dims[0] = dims[0](value_format=DateFormatter(dt_format))
coords = (ys, xs) if self.invert_axes else (xs, ys)
return coords, style, {'dimensions': dims}
def init_artists(self, ax, plot_args, plot_kwargs):
xs, ys = plot_args
if xs.dtype.kind == 'M' or (len(xs) and isinstance(xs[0], datetime_types)):
artist = ax.plot_date(xs, ys, '-', **plot_kwargs)[0]
else:
artist = ax.plot(xs, ys, **plot_kwargs)[0]
return {'artist': artist}
def update_handles(self, key, axis, element, ranges, style):
artist = self.handles['artist']
(xs, ys), style, axis_kwargs = self.get_data(element, ranges, style)
artist.set_xdata(xs)
artist.set_ydata(ys)
return axis_kwargs
class ErrorPlot(ColorbarPlot):
"""
ErrorPlot plots the ErrorBar Element type and supporting
both horizontal and vertical error bars via the 'horizontal'
plot option.
"""
style_opts = ['edgecolor', 'elinewidth', 'capsize', 'capthick',
'barsabove', 'lolims', 'uplims', 'xlolims',
'errorevery', 'xuplims', 'alpha', 'linestyle',
'linewidth', 'markeredgecolor', 'markeredgewidth',
'markerfacecolor', 'markersize', 'solid_capstyle',
'solid_joinstyle', 'dashes', 'color']
_plot_methods = dict(single='errorbar')
def init_artists(self, ax, plot_data, plot_kwargs):
handles = ax.errorbar(*plot_data, **plot_kwargs)
bottoms, tops = None, None
if mpl_version >= str('2.0'):
_, caps, verts = handles
if caps:
bottoms, tops = caps
else:
_, (bottoms, tops), verts = handles
return {'bottoms': bottoms, 'tops': tops, 'verts': verts[0], 'artist': verts[0]}
def get_data(self, element, ranges, style):
with abbreviated_exception():
style = self._apply_transforms(element, ranges, style)
color = style.get('color')
if isinstance(color, np.ndarray):
style['ecolor'] = color
if 'edgecolor' in style:
style['ecolor'] = style.pop('edgecolor')
c = style.get('c')
if isinstance(c, np.ndarray):
with abbreviated_exception():
raise ValueError('Mapping a continuous or categorical '
'dimension to a color on a ErrorBarPlot '
'is not supported by the {backend} backend. '
'To map a dimension to a color supply '
'an explicit list of rgba colors.'.format(
backend=self.renderer.backend
)
)
style['fmt'] = 'none'
dims = element.dimensions()
xs, ys = (element.dimension_values(i) for i in range(2))
yerr = element.array(dimensions=dims[2:4])
if self.invert_axes:
coords = (ys, xs)
err_key = 'xerr'
else:
coords = (xs, ys)
err_key = 'yerr'
style[err_key] = yerr.T if len(dims) > 3 else yerr[:, 0]
return coords, style, {}
def update_handles(self, key, axis, element, ranges, style):
bottoms = self.handles['bottoms']
tops = self.handles['tops']
verts = self.handles['verts']
_, style, axis_kwargs = self.get_data(element, ranges, style)
xs, ys, neg_error = (element.dimension_values(i) for i in range(3))
samples = len(xs)
pos_error = element.dimension_values(3) if len(element.dimensions()) > 3 else neg_error
if self.invert_axes:
bxs, bys = ys - neg_error, xs
txs, tys = ys + pos_error, xs
new_arrays = [np.array([[bxs[i], xs[i]], [txs[i], xs[i]]])
for i in range(samples)]
else:
bxs, bys = xs, ys - neg_error
txs, tys = xs, ys + pos_error
new_arrays = [np.array([[xs[i], bys[i]], [xs[i], tys[i]]])
for i in range(samples)]
verts.set_paths(new_arrays)
if bottoms:
bottoms.set_xdata(bxs)
bottoms.set_ydata(bys)
if tops:
tops.set_xdata(txs)
tops.set_ydata(tys)
if 'ecolor' in style:
verts.set_edgecolors(style['ecolor'])
if 'linewidth' in style:
verts.set_linewidths(style['linewidth'])
return axis_kwargs
class AreaPlot(ChartPlot):
show_legend = param.Boolean(default=False, doc="""
Whether to show legend for the plot.""")
style_opts = ['color', 'facecolor', 'alpha', 'edgecolor', 'linewidth',
'hatch', 'linestyle', 'joinstyle',
'fill', 'capstyle', 'interpolate']
_nonvectorized_styles = style_opts
_plot_methods = dict(single='fill_between')
def get_data(self, element, ranges, style):
with abbreviated_exception():
style = self._apply_transforms(element, ranges, style)
xs = element.dimension_values(0)
ys = [element.dimension_values(vdim) for vdim in element.vdims]
return tuple([xs]+ys), style, {}
def init_artists(self, ax, plot_data, plot_kwargs):
fill_fn = ax.fill_betweenx if self.invert_axes else ax.fill_between
stack = fill_fn(*plot_data, **plot_kwargs)
return {'artist': stack}
def get_extents(self, element, ranges, range_type='combined'):
vdims = element.vdims[:2]
vdim = vdims[0].name
if len(vdims) > 1:
new_range = {}
for r in ranges[vdim]:
new_range[r] = max_range([ranges[vd.name][r] for vd in vdims])
ranges[vdim] = new_range
else:
s0, s1 = ranges[vdim]['soft']
s0 = min(s0, 0) if isfinite(s0) else 0
s1 = max(s1, 0) if isfinite(s1) else 0
ranges[vdim]['soft'] = (s0, s1)
return super(AreaPlot, self).get_extents(element, ranges, range_type)
class SideAreaPlot(AdjoinedPlot, AreaPlot):
bgcolor = param.Parameter(default=(1, 1, 1, 0), doc="""
Make plot background invisible.""")
border_size = param.Number(default=0, doc="""
The size of the border expressed as a fraction of the main plot.""")
xaxis = param.ObjectSelector(default='bare',
objects=['top', 'bottom', 'bare', 'top-bare',
'bottom-bare', None], doc="""
Whether and where to display the xaxis, bare options allow suppressing
all axis labels including ticks and xlabel. Valid options are 'top',
'bottom', 'bare', 'top-bare' and 'bottom-bare'.""")
yaxis = param.ObjectSelector(default='bare',
objects=['left', 'right', 'bare', 'left-bare',
'right-bare', None], doc="""
Whether and where to display the yaxis, bare options allow suppressing
all axis labels including ticks and ylabel. Valid options are 'left',
'right', 'bare' 'left-bare' and 'right-bare'.""")
class SpreadPlot(AreaPlot):
"""
SpreadPlot plots the Spread Element type.
"""
show_legend = param.Boolean(default=False, doc="""
Whether to show legend for the plot.""")
def __init__(self, element, **params):
super(SpreadPlot, self).__init__(element, **params)
def get_data(self, element, ranges, style):
xs = element.dimension_values(0)
mean = element.dimension_values(1)
neg_error = element.dimension_values(2)
pos_idx = 3 if len(element.dimensions()) > 3 else 2
pos_error = element.dimension_values(pos_idx)
return (xs, mean-neg_error, mean+pos_error), style, {}
def get_extents(self, element, ranges, range_type='combined'):
return ChartPlot.get_extents(self, element, ranges, range_type)
class HistogramPlot(ColorbarPlot):
"""
HistogramPlot can plot DataHistograms and ViewMaps of
DataHistograms, which can be displayed as a single frame or
animation.
"""
style_opts = ['alpha', 'color', 'align', 'visible', 'facecolor',
'edgecolor', 'log', 'capsize', 'error_kw', 'hatch',
'linewidth']
_nonvectorized_styles = ['alpha', 'log', 'error_kw', 'hatch', 'visible', 'align']
def __init__(self, histograms, **params):
self.center = False
self.cyclic = False
super(HistogramPlot, self).__init__(histograms, **params)
if self.invert_axes:
self.axis_settings = ['ylabel', 'xlabel', 'yticks']
else:
self.axis_settings = ['xlabel', 'ylabel', 'xticks']
val_dim = self.hmap.last.get_dimension(1)
self.cyclic_range = val_dim.range if val_dim.cyclic else None
@mpl_rc_context
def initialize_plot(self, ranges=None):
hist = self.hmap.last
key = self.keys[-1]
ranges = self.compute_ranges(self.hmap, key, ranges)
el_ranges = match_spec(hist, ranges)
# Get plot ranges and values
dims = hist.dimensions()[:2]
edges, hvals, widths, lims, isdatetime = self._process_hist(hist)
if isdatetime and not dims[0].value_format:
dt_format = Dimension.type_formatters[np.datetime64]
dims[0] = dims[0](value_format=DateFormatter(dt_format))
style = self.style[self.cyclic_index]
if self.invert_axes:
self.offset_linefn = self.handles['axis'].axvline
self.plotfn = self.handles['axis'].barh
else:
self.offset_linefn = self.handles['axis'].axhline
self.plotfn = self.handles['axis'].bar
with abbreviated_exception():
style = self._apply_transforms(hist, ranges, style)
if 'vmin' in style:
raise ValueError('Mapping a continuous dimension to a '
'color on a HistogramPlot is not '
'supported by the {backend} backend. '
'To map a dimension to a color supply '
'an explicit list of rgba colors.'.format(
backend=self.renderer.backend
)
)
# Plot bars and make any adjustments
legend = hist.label if self.show_legend else ''
bars = self.plotfn(edges, hvals, widths, zorder=self.zorder, label=legend, align='edge', **style)
self.handles['artist'] = self._update_plot(self.keys[-1], hist, bars, lims, ranges) # Indexing top
ticks = self._compute_ticks(hist, edges, widths, lims)
ax_settings = self._process_axsettings(hist, lims, ticks)
ax_settings['dimensions'] = dims
return self._finalize_axis(self.keys[-1], ranges=el_ranges, element=hist, **ax_settings)
def _process_hist(self, hist):
"""
Get data from histogram, including bin_ranges and values.
"""
self.cyclic = hist.get_dimension(0).cyclic
x = hist.kdims[0]
edges = hist.interface.coords(hist, x, edges=True)
values = hist.dimension_values(1)
hist_vals = np.array(values)
xlim = hist.range(0)
ylim = hist.range(1)
isdatetime = False
if edges.dtype.kind == 'M' or isinstance(edges[0], datetime_types):
edges = np.array([dt64_to_dt(e) if isinstance(e, np.datetime64) else e for e in edges])
edges = date2num(edges)
xlim = tuple(dt_to_int(v, 'D') for v in xlim)
isdatetime = True
widths = np.diff(edges)
return edges[:-1], hist_vals, widths, xlim+ylim, isdatetime
def _compute_ticks(self, element, edges, widths, lims):
"""
Compute the ticks either as cyclic values in degrees or as roughly
evenly spaced bin centers.
"""
if self.xticks is None or not isinstance(self.xticks, int):
return None
if self.cyclic:
x0, x1, _, _ = lims
xvals = np.linspace(x0, x1, self.xticks)
labels = ["%.0f" % np.rad2deg(x) + '\N{DEGREE SIGN}' for x in xvals]
elif self.xticks:
dim = element.get_dimension(0)
inds = np.linspace(0, len(edges), self.xticks, dtype=np.int)
edges = list(edges) + [edges[-1] + widths[-1]]
xvals = [edges[i] for i in inds]
labels = [dim.pprint_value(v) for v in xvals]
return [xvals, labels]
def get_extents(self, element, ranges, range_type='combined'):
ydim = element.get_dimension(1)
s0, s1 = ranges[ydim.name]['soft']
s0 = min(s0, 0) if isfinite(s0) else 0
s1 = max(s1, 0) if isfinite(s1) else 0
ranges[ydim.name]['soft'] = (s0, s1)
return super(HistogramPlot, self).get_extents(element, ranges, range_type)
def _process_axsettings(self, hist, lims, ticks):
"""
Get axis settings options including ticks, x- and y-labels
and limits.
"""
axis_settings = dict(zip(self.axis_settings, [None, None, (None if self.overlaid else ticks)]))
return axis_settings
def _update_plot(self, key, hist, bars, lims, ranges):
"""
Process bars can be subclassed to manually adjust bars
after being plotted.
"""
return bars
def _update_artists(self, key, hist, edges, hvals, widths, lims, ranges):
"""
Update all the artists in the histogram. Subclassable to
allow updating of further artists.
"""
plot_vals = zip(self.handles['artist'], edges, hvals, widths)
for bar, edge, height, width in plot_vals:
if self.invert_axes:
bar.set_y(edge)
bar.set_width(height)
bar.set_height(width)
else:
bar.set_x(edge)
bar.set_height(height)
bar.set_width(width)
def update_handles(self, key, axis, element, ranges, style):
# Process values, axes and style
edges, hvals, widths, lims, _ = self._process_hist(element)
ticks = self._compute_ticks(element, edges, widths, lims)
ax_settings = self._process_axsettings(element, lims, ticks)
self._update_artists(key, element, edges, hvals, widths, lims, ranges)
return ax_settings
class SideHistogramPlot(AdjoinedPlot, HistogramPlot):
bgcolor = param.Parameter(default=(1, 1, 1, 0), doc="""
Make plot background invisible.""")
offset = param.Number(default=0.2, bounds=(0,1), doc="""
Histogram value offset for a colorbar.""")
show_grid = param.Boolean(default=False, doc="""
Whether to overlay a grid on the axis.""")
def _process_hist(self, hist):
"""
Subclassed to offset histogram by defined amount.
"""
edges, hvals, widths, lims, isdatetime = super(SideHistogramPlot, self)._process_hist(hist)
offset = self.offset * lims[3]
hvals *= 1-self.offset
hvals += offset
lims = lims[0:3] + (lims[3] + offset,)
return edges, hvals, widths, lims, isdatetime
def _update_artists(self, n, element, edges, hvals, widths, lims, ranges):
super(SideHistogramPlot, self)._update_artists(n, element, edges, hvals, widths, lims, ranges)
self._update_plot(n, element, self.handles['artist'], lims, ranges)
def _update_plot(self, key, element, bars, lims, ranges):
"""
Process the bars and draw the offset line as necessary. If a
color map is set in the style of the 'main' ViewableElement object, color
the bars appropriately, respecting the required normalization
settings.
"""
main = self.adjoined.main
_, y1 = element.range(1)
offset = self.offset * y1
range_item, main_range, dim = get_sideplot_ranges(self, element, main, ranges)
# Check if plot is colormapped
plot_type = Store.registry['matplotlib'].get(type(range_item))
if isinstance(plot_type, PlotSelector):
plot_type = plot_type.get_plot_class(range_item)
opts = self.lookup_options(range_item, 'plot')
if plot_type and issubclass(plot_type, ColorbarPlot):
cidx = opts.options.get('color_index', None)
if cidx is None:
opts = self.lookup_options(range_item, 'style')
cidx = opts.kwargs.get('color', None)
if cidx not in range_item:
cidx = None
cdim = None if cidx is None else range_item.get_dimension(cidx)
else:
cdim = None
# Get colormapping options
if isinstance(range_item, (HeatMap, Raster)) or cdim:
style = self.lookup_options(range_item, 'style')[self.cyclic_index]
cmap = cm.get_cmap(style.get('cmap'))
main_range = style.get('clims', main_range)
else:
cmap = None
if offset and ('offset_line' not in self.handles):
self.handles['offset_line'] = self.offset_linefn(offset,
linewidth=1.0,
color='k')
elif offset:
self._update_separator(offset)
if cmap is not None:
self._colorize_bars(cmap, bars, element, main_range, dim)
return bars
def _colorize_bars(self, cmap, bars, element, main_range, dim):
"""
Use the given cmap to color the bars, applying the correct
color ranges as necessary.
"""
cmap_range = main_range[1] - main_range[0]
lower_bound = main_range[0]
colors = np.array(element.dimension_values(dim))
colors = (colors - lower_bound) / (cmap_range)
for c, bar in zip(colors, bars):
bar.set_facecolor(cmap(c))
bar.set_clip_on(False)
def _update_separator(self, offset):
"""
Compute colorbar offset and update separator line
if map is non-zero.
"""
offset_line = self.handles['offset_line']
if offset == 0:
offset_line.set_visible(False)
else:
offset_line.set_visible(True)
if self.invert_axes:
offset_line.set_xdata(offset)
else:
offset_line.set_ydata(offset)
class PointPlot(ChartPlot, ColorbarPlot):
"""
Note that the 'cmap', 'vmin' and 'vmax' style arguments control
how point magnitudes are rendered to different colors.
"""
show_grid = param.Boolean(default=False, doc="""
Whether to draw grid lines at the tick positions.""")
# Deprecated parameters
color_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Deprecated in favor of color style mapping, e.g. `color=dim('color')`""")
size_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Deprecated in favor of size style mapping, e.g. `size=dim('size')`""")
scaling_method = param.ObjectSelector(default="area",
objects=["width", "area"],
doc="""
Deprecated in favor of size style mapping, e.g.
size=dim('size')**2.""")
scaling_factor = param.Number(default=1, bounds=(0, None), doc="""
Scaling factor which is applied to either the width or area
of each point, depending on the value of `scaling_method`.""")
size_fn = param.Callable(default=np.abs, doc="""
Function applied to size values before applying scaling,
to remove values lower than zero.""")
style_opts = ['alpha', 'color', 'edgecolors', 'facecolors',
'linewidth', 'marker', 'size', 'visible',
'cmap', 'vmin', 'vmax', 'norm']
_nonvectorized_styles = ['alpha', 'marker', 'cmap', 'vmin', 'vmax',
'norm', 'visible']
_disabled_opts = ['size']
_plot_methods = dict(single='scatter')
def get_data(self, element, ranges, style):
xs, ys = (element.dimension_values(i) for i in range(2))
self._compute_styles(element, ranges, style)
with abbreviated_exception():
style = self._apply_transforms(element, ranges, style)
return (ys, xs) if self.invert_axes else (xs, ys), style, {}
def _compute_styles(self, element, ranges, style):
cdim = element.get_dimension(self.color_index)
color = style.pop('color', None)
cmap = style.get('cmap', None)
if cdim and ((isinstance(color, basestring) and color in element) or isinstance(color, dim)):
self.param.warning(
"Cannot declare style mapping for 'color' option and "
"declare a color_index; ignoring the color_index.")
cdim = None
if cdim and cmap:
cs = element.dimension_values(self.color_index)
# Check if numeric otherwise treat as categorical
if cs.dtype.kind in 'uif':
style['c'] = cs
else:
style['c'] = search_indices(cs, unique_array(cs))
self._norm_kwargs(element, ranges, style, cdim)
elif color is not None:
style['color'] = color
style['edgecolors'] = style.pop('edgecolors', style.pop('edgecolor', 'none'))
ms = style.get('s', mpl.rcParams['lines.markersize'])
sdim = element.get_dimension(self.size_index)
if sdim and ((isinstance(ms, basestring) and ms in element) or isinstance(ms, dim)):
self.param.warning(
"Cannot declare style mapping for 's' option and "
"declare a size_index; ignoring the size_index.")
sdim = None
if sdim:
sizes = element.dimension_values(self.size_index)
sizes = compute_sizes(sizes, self.size_fn, self.scaling_factor,
self.scaling_method, ms)
if sizes is None:
eltype = type(element).__name__
self.param.warning(
'%s dimension is not numeric, cannot use to '
'scale %s size.' % (sdim.pprint_label, eltype))
else:
style['s'] = sizes
style['edgecolors'] = style.pop('edgecolors', 'none')
def update_handles(self, key, axis, element, ranges, style):
paths = self.handles['artist']
(xs, ys), style, _ = self.get_data(element, ranges, style)
paths.set_offsets(np.column_stack([xs, ys]))
if 's' in style:
sizes = style['s']
if isscalar(sizes):
sizes = [sizes]
paths.set_sizes(sizes)
if 'vmin' in style:
paths.set_clim((style['vmin'], style['vmax']))
if 'c' in style:
paths.set_array(style['c'])
if 'norm' in style:
paths.norm = style['norm']
if 'linewidth' in style:
paths.set_linewidths(style['linewidth'])
if 'edgecolors' in style:
paths.set_edgecolors(style['edgecolors'])
if 'facecolors' in style:
paths.set_edgecolors(style['facecolors'])
class VectorFieldPlot(ColorbarPlot):
"""
Renders vector fields in sheet coordinates. The vectors are
expressed in polar coordinates and may be displayed according to
angle alone (with some common, arbitrary arrow length) or may be
true polar vectors.
The color or magnitude can be mapped onto any dimension using the
color_index and size_index.
The length of the arrows is controlled by the 'scale' style
option. The scaling of the arrows may also be controlled via the
normalize_lengths and rescale_lengths plot option, which will
normalize the lengths to a maximum of 1 and scale them according
to the minimum distance respectively.
"""
arrow_heads = param.Boolean(default=True, doc="""
Whether or not to draw arrow heads. If arrowheads are enabled,
they may be customized with the 'headlength' and
'headaxislength' style options.""")
magnitude = param.ClassSelector(class_=(basestring, dim), doc="""
Dimension or dimension value transform that declares the magnitude
of each vector. Magnitude is expected to be scaled between 0-1,
by default the magnitudes are rescaled relative to the minimum
distance between vectors, this can be disabled with the
rescale_lengths option.""")
rescale_lengths = param.Boolean(default=True, doc="""
Whether the lengths will be rescaled to take into account the
smallest non-zero distance between two vectors.""")
# Deprecated parameters
color_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Deprecated in favor of dimension value transform on color option,
e.g. `color=dim('Magnitude')`.
""")
size_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Deprecated in favor of the magnitude option, e.g.
`magnitude=dim('Magnitude')`.
""")
normalize_lengths = param.Boolean(default=True, doc="""
Deprecated in favor of rescaling length using dimension value
transforms using the magnitude option, e.g.
`dim('Magnitude').norm()`.""")
style_opts = ['alpha', 'color', 'edgecolors', 'facecolors',
'linewidth', 'marker', 'visible', 'cmap',
'scale', 'headlength', 'headaxislength', 'pivot',
'width', 'headwidth', 'norm']
_nonvectorized_styles = ['alpha', 'marker', 'cmap', 'visible', 'norm',
'pivot', 'headlength', 'headaxislength',
'headwidth']
_plot_methods = dict(single='quiver')
def _get_magnitudes(self, element, style, ranges):
size_dim = element.get_dimension(self.size_index)
mag_dim = self.magnitude
if size_dim and mag_dim:
self.param.warning(
"Cannot declare style mapping for 'magnitude' option "
"and declare a size_index; ignoring the size_index.")
elif size_dim:
mag_dim = size_dim
elif isinstance(mag_dim, basestring):
mag_dim = element.get_dimension(mag_dim)
if mag_dim is not None:
if isinstance(mag_dim, dim):
magnitudes = mag_dim.apply(element, flat=True)
else:
magnitudes = element.dimension_values(mag_dim)
_, max_magnitude = ranges[dimension_name(mag_dim)]['combined']
if self.normalize_lengths and max_magnitude != 0:
magnitudes = magnitudes / max_magnitude
else:
magnitudes = np.ones(len(element))
return magnitudes
def get_data(self, element, ranges, style):
# Compute coordinates
xidx, yidx = (1, 0) if self.invert_axes else (0, 1)
xs = element.dimension_values(xidx) if len(element.data) else []
ys = element.dimension_values(yidx) if len(element.data) else []
# Compute vector angle and magnitude
radians = element.dimension_values(2) if len(element.data) else []
if self.invert_axes: radians = radians+1.5*np.pi
angles = list(np.rad2deg(radians))
magnitudes = self._get_magnitudes(element, style, ranges)
input_scale = style.pop('scale', 1.0)
if self.rescale_lengths:
min_dist = get_min_distance(element)
input_scale = input_scale / min_dist
args = (xs, ys, magnitudes, [0.0] * len(element))
# Compute color
cdim = element.get_dimension(self.color_index)
color = style.get('color', None)
if cdim and ((isinstance(color, basestring) and color in element) or isinstance(color, dim)):
self.param.warning(
"Cannot declare style mapping for 'color' option and "
"declare a color_index; ignoring the color_index.")
cdim = None
if cdim:
colors = element.dimension_values(self.color_index)
style['c'] = colors
cdim = element.get_dimension(self.color_index)
self._norm_kwargs(element, ranges, style, cdim)
style.pop('color', None)
# Process style
with abbreviated_exception():
style = self._apply_transforms(element, ranges, style)
style.update(dict(scale=input_scale, angles=angles, units='x', scale_units='x'))
if 'vmin' in style:
style['clim'] = (style.pop('vmin'), style.pop('vmax'))
if 'c' in style:
style['array'] = style.pop('c')
if 'pivot' not in style:
style['pivot'] = 'mid'
if not self.arrow_heads:
style['headaxislength'] = 0
return args, style, {}
def update_handles(self, key, axis, element, ranges, style):
args, style, axis_kwargs = self.get_data(element, ranges, style)
# Set magnitudes, angles and colors if supplied.
quiver = self.handles['artist']
quiver.set_offsets(np.column_stack(args[:2]))
quiver.U = args[2]
quiver.angles = style['angles']
if 'color' in style:
quiver.set_facecolors(style['color'])
quiver.set_edgecolors(style['color'])
if 'array' in style:
quiver.set_array(style['array'])
if 'clim' in style:
quiver.set_clim(style['clim'])
if 'linewidth' in style:
quiver.set_linewidths(style['linewidth'])
return axis_kwargs
class BarPlot(LegendPlot):
padding = param.Number(default=0.2, doc="""
Defines the padding between groups.""")
show_legend = param.Boolean(default=True, doc="""
Whether to show legend for the plot.""")
stacked = param.Boolean(default=False, doc="""
Whether the bars should be stacked or grouped.""")
xticks = param.Integer(0, precedence=-1)
# Deprecated parameters
color_by = param.List(default=['category'], doc="""
Defines how the Bar elements colored. Valid options include
any permutation of 'group', 'category' and 'stack'.""")
group_index = param.Integer(default=0, doc="""
Index of the dimension in the supplied Bars
Element, which will be laid out into groups.""")
category_index = param.Integer(default=1, doc="""
Index of the dimension in the supplied Bars
Element, which will be laid out into categories.""")
stack_index = param.Integer(default=2, doc="""
Index of the dimension in the supplied Bars
Element, which will stacked.""")
style_opts = ['alpha', 'color', 'align', 'visible', 'edgecolor',
'log', 'facecolor', 'capsize', 'error_kw', 'hatch']
_nonvectorized_styles = style_opts
legend_specs = dict(LegendPlot.legend_specs, **{
'top': dict(bbox_to_anchor=(0., 1.02, 1., .102),
ncol=3, loc=3, mode="expand", borderaxespad=0.),
'bottom': dict(ncol=3, mode="expand", loc=2,
bbox_to_anchor=(0., -0.4, 1., .102),
borderaxespad=0.1)})
_dimensions = OrderedDict([('group', 0),
('category',1),
('stack',2)])
def __init__(self, element, **params):
super(BarPlot, self).__init__(element, **params)
self.values, self.bar_dimensions = self._get_values()
def _get_values(self):
"""
Get unique index value for each bar
"""
(gi, _), (ci, _), (si, _) = self._get_dims(self.hmap.last)
ndims = self.hmap.last.ndims
dims = self.hmap.last.kdims
dimensions = []
values = {}
for vidx, vtype in zip([gi, ci, si], self._dimensions):
if vidx < ndims:
dim = dims[vidx]
dimensions.append(dim)
vals = self.hmap.dimension_values(dim.name)
else:
dimensions.append(None)
vals = [None]
values[vtype] = list(unique_iterator(vals))
return values, dimensions
def _compute_styles(self, element, style_groups):
"""
Computes color and hatch combinations by
any combination of the 'group', 'category'
and 'stack'.
"""
style = self.lookup_options(element, 'style')[0]
sopts = []
for sopt in ['color', 'hatch']:
if sopt in style:
sopts.append(sopt)
style.pop(sopt, None)
color_groups = []
for sg in style_groups:
color_groups.append(self.values[sg])
style_product = list(product(*color_groups))
wrapped_style = self.lookup_options(element, 'style').max_cycles(len(style_product))
color_groups = {k:tuple(wrapped_style[n][sopt] for sopt in sopts)
for n,k in enumerate(style_product)}
return style, color_groups, sopts
def get_extents(self, element, ranges, range_type='combined'):
ngroups = len(self.values['group'])
vdim = element.vdims[0].name
if self.stacked or self.stack_index == 1:
return 0, 0, ngroups, np.NaN
else:
vrange = ranges[vdim]['combined']
return 0, np.nanmin([vrange[0], 0]), ngroups, vrange[1]
@mpl_rc_context
def initialize_plot(self, ranges=None):
element = self.hmap.last
vdim = element.vdims[0]
axis = self.handles['axis']
key = self.keys[-1]
ranges = self.compute_ranges(self.hmap, key, ranges)
ranges = match_spec(element, ranges)
self.handles['artist'], self.handles['xticks'], xdims = self._create_bars(axis, element)
return self._finalize_axis(key, ranges=ranges, xticks=self.handles['xticks'],
element=element, dimensions=[xdims, vdim])
def _finalize_ticks(self, axis, element, xticks, yticks, zticks):
"""
Apply ticks with appropriate offsets.
"""
yalignments = None
if xticks is not None:
ticks, labels, yalignments = zip(*sorted(xticks, key=lambda x: x[0]))
xticks = (list(ticks), list(labels))
super(BarPlot, self)._finalize_ticks(axis, element, xticks, yticks, zticks)
if yalignments:
for t, y in zip(axis.get_xticklabels(), yalignments):
t.set_y(y)
def _get_dims(self, element):
ndims = len(element.dimensions())
if element.ndims < 2:
gdim, cdim, sdim = element.kdims[0], None, None
gi, ci, si = 0, ndims+1, ndims+1
elif element.ndims == 3:
gdim, cdim, sdim = element.kdims
gi, ci, si = 0, 1, 2
elif self.stacked or self.stack_index == 1:
gdim, cdim, sdim = element.kdims[0], None, element.kdims[1]
gi, ci, si = 0, ndims+1, 1
else:
gdim, cdim, sdim = element.kdims[0], element.kdims[1], None
gi, ci, si = 0, 1, ndims+1
return (gi, gdim), (ci, cdim), (si, sdim)
def _create_bars(self, axis, element):
# Get style and dimension information
values = self.values
if self.group_index != 0:
self.warning('Bars group_index plot option is deprecated '
'and will be ignored, set stacked=True/False '
'instead.')
if self.category_index != 1:
self.warning('Bars category_index plot option is deprecated '
'and will be ignored, set stacked=True/False '
'instead.')
if self.stack_index != 2 and not (self.stack_index == 1 and not self.stacked):
self.warning('Bars stack_index plot option is deprecated '
'and will be ignored, set stacked=True/False '
'instead.')
if self.color_by != ['category']:
self.warning('Bars color_by plot option is deprecated '
'and will be ignored, in future it will '
'support color style mapping by dimension.')
(gi, gdim), (ci, cdim), (si, sdim) = self._get_dims(element)
indices = dict(zip(self._dimensions, (gi, ci, si)))
color_by = ['category'] if cdim else ['stack']
style_groups = [sg for sg in color_by if indices[sg] < element.ndims]
style_opts, color_groups, sopts = self._compute_styles(element, style_groups)
dims = element.dimensions('key', label=True)
ndims = len(dims)
xdims = [d for d in [cdim, gdim] if d is not None]
# Compute widths
width = (1-(2.*self.padding)) / len(values['category'])
# Initialize variables
xticks = []
val_key = [None] * ndims
style_key = [None] * len(style_groups)
label_key = [None] * len(style_groups)
labels = []
bars = {}
# Iterate over group, category and stack dimension values
# computing xticks and drawing bars and applying styles
for gidx, grp_name in enumerate(values['group']):
if grp_name is not None:
grp = gdim.pprint_value(grp_name)
if 'group' in style_groups:
idx = style_groups.index('group')
label_key[idx] = str(grp)
style_key[idx] = grp_name
val_key[gi] = grp_name
if ci < ndims:
yalign = -0.04
else:
yalign = 0
xticks.append((gidx+0.5, grp, yalign))
for cidx, cat_name in enumerate(values['category']):
xpos = gidx+self.padding+(cidx*width)
if cat_name is not None:
cat = gdim.pprint_value(cat_name)
if 'category' in style_groups:
idx = style_groups.index('category')
label_key[idx] = str(cat)
style_key[idx] = cat_name
val_key[ci] = cat_name
xticks.append((xpos+width/2., cat, 0))
prev = 0
for stk_name in values['stack']:
if stk_name is not None:
if 'stack' in style_groups:
idx = style_groups.index('stack')
stk = gdim.pprint_value(stk_name)
label_key[idx] = str(stk)
style_key[idx] = stk_name
val_key[si] = stk_name
vals = element.sample([tuple(val_key)]).dimension_values(element.vdims[0].name)
val = float(vals[0]) if len(vals) else np.NaN
label = ', '.join(label_key)
style = dict(style_opts, label='' if label in labels else label,
**dict(zip(sopts, color_groups[tuple(style_key)])))
with abbreviated_exception():
style = self._apply_transforms(element, {}, style)
bar = axis.bar([xpos+width/2.], [val], width=width, bottom=prev,
**style)
# Update variables
bars[tuple(val_key)] = bar
prev += val if isfinite(val) else 0
labels.append(label)
title = [element.kdims[indices[cg]].pprint_label
for cg in color_by if indices[cg] < ndims]
if self.show_legend and any(len(l) for l in labels) and color_by != ['category']:
leg_spec = self.legend_specs[self.legend_position]
if self.legend_cols: leg_spec['ncol'] = self.legend_cols
axis.legend(title=', '.join(title), **leg_spec)
return bars, xticks, xdims
def update_handles(self, key, axis, element, ranges, style):
dims = element.dimensions('key', label=True)
ndims = len(dims)
(gi, _), (ci, _), (si, _) = self._get_dims(element)
val_key = [None] * ndims
for g in self.values['group']:
if g is not None: val_key[gi] = g
for c in self.values['category']:
if c is not None: val_key[ci] = c
prev = 0
for s in self.values['stack']:
if s is not None: val_key[si] = s
bar = self.handles['artist'].get(tuple(val_key))
if bar:
vals = element.sample([tuple(val_key)]).dimension_values(element.vdims[0].name)
height = float(vals[0]) if len(vals) else np.NaN
bar[0].set_height(height)
bar[0].set_y(prev)
prev += height if isfinite(height) else 0
return {'xticks': self.handles['xticks']}
class SpikesPlot(PathPlot, ColorbarPlot):
aspect = param.Parameter(default='square', doc="""
The aspect ratio mode of the plot. Allows setting an
explicit aspect ratio as width/height as well as
'square' and 'equal' options.""")
color_index = param.ClassSelector(default=None, allow_None=True,
class_=(basestring, int), doc="""
Index of the dimension from which the color will the drawn""")
spike_length = param.Number(default=0.1, doc="""
The length of each spike if Spikes object is one dimensional.""")
position = param.Number(default=0., doc="""
The position of the lower end of each spike.""")
style_opts = PathPlot.style_opts + ['cmap']
def init_artists(self, ax, plot_args, plot_kwargs):
if 'c' in plot_kwargs:
plot_kwargs['array'] = plot_kwargs.pop('c')
if 'vmin' in plot_kwargs and 'vmax' in plot_kwargs:
plot_kwargs['clim'] = plot_kwargs.pop('vmin'), plot_kwargs.pop('vmax')
line_segments = LineCollection(*plot_args, **plot_kwargs)
ax.add_collection(line_segments)
return {'artist': line_segments}
def get_extents(self, element, ranges, range_type='combined'):
if len(element.dimensions()) > 1:
ydim = element.get_dimension(1)
s0, s1 = ranges[ydim.name]['soft']
s0 = min(s0, 0) if isfinite(s0) else 0
s1 = max(s1, 0) if isfinite(s1) else 0
ranges[ydim.name]['soft'] = (s0, s1)
l, b, r, t = super(SpikesPlot, self).get_extents(element, ranges, range_type)
if len(element.dimensions()) == 1 and range_type != 'hard':
if self.batched:
bs, ts = [], []
# Iterate over current NdOverlay and compute extents
# from position and length plot options
frame = self.current_frame or self.hmap.last
for el in frame.values():
opts = self.lookup_options(el, 'plot').options
pos = opts.get('position', self.position)
length = opts.get('spike_length', self.spike_length)
bs.append(pos)
ts.append(pos+length)
b, t = (np.nanmin(bs), np.nanmax(ts))
else:
b, t = self.position, self.position+self.spike_length
return l, b, r, t
def get_data(self, element, ranges, style):
dimensions = element.dimensions(label=True)
ndims = len(dimensions)
pos = self.position
if ndims > 1:
data = [[(x, pos), (x, pos+y)] for x, y in element.array([0, 1])]
else:
height = self.spike_length
data = [[(x[0], pos), (x[0], pos+height)] for x in element.array([0])]
if self.invert_axes:
data = [(line[0][::-1], line[1][::-1]) for line in data]
dims = element.dimensions()
clean_spikes = []
for spike in data:
xs, ys = zip(*spike)
cols = []
for i, vs in enumerate((xs, ys)):
vs = np.array(vs)
if (vs.dtype.kind == 'M' or (len(vs) and isinstance(vs[0], datetime_types))) and i < len(dims):
dt_format = Dimension.type_formatters[np.datetime64]
dims[i] = dims[i](value_format=DateFormatter(dt_format))
vs = np.array([dt_to_int(v, 'D') for v in vs])
cols.append(vs)
clean_spikes.append(np.column_stack(cols))
cdim = element.get_dimension(self.color_index)
color = style.get('color', None)
if cdim and ((isinstance(color, basestring) and color in element) or isinstance(color, dim)):
self.param.warning(
"Cannot declare style mapping for 'color' option and "
"declare a color_index; ignoring the color_index.")
cdim = None
if cdim:
style['array'] = element.dimension_values(cdim)
self._norm_kwargs(element, ranges, style, cdim)
with abbreviated_exception():
style = self._apply_transforms(element, ranges, style)
return (clean_spikes,), style, {'dimensions': dims}
def update_handles(self, key, axis, element, ranges, style):
artist = self.handles['artist']
(data,), kwargs, axis_kwargs = self.get_data(element, ranges, style)
artist.set_paths(data)
artist.set_visible(style.get('visible', True))
if 'color' in kwargs:
artist.set_edgecolors(kwargs['color'])
if 'array' in kwargs or 'c' in kwargs:
artist.set_array(kwargs.get('array', kwargs.get('c')))
if 'vmin' in kwargs:
artist.set_clim((kwargs['vmin'], kwargs['vmax']))
if 'norm' in kwargs:
artist.norm = kwargs['norm']
if 'linewidth' in kwargs:
artist.set_linewidths(kwargs['linewidth'])
return axis_kwargs
class SideSpikesPlot(AdjoinedPlot, SpikesPlot):
bgcolor = param.Parameter(default=(1, 1, 1, 0), doc="""
Make plot background invisible.""")
border_size = param.Number(default=0, doc="""
The size of the border expressed as a fraction of the main plot.""")
subplot_size = param.Number(default=0.1, doc="""
The size subplots as expressed as a fraction of the main plot.""")
spike_length = param.Number(default=1, doc="""
The length of each spike if Spikes object is one dimensional.""")
xaxis = param.ObjectSelector(default='bare',
objects=['top', 'bottom', 'bare', 'top-bare',
'bottom-bare', None], doc="""
Whether and where to display the xaxis, bare options allow suppressing
all axis labels including ticks and xlabel. Valid options are 'top',
'bottom', 'bare', 'top-bare' and 'bottom-bare'.""")
yaxis = param.ObjectSelector(default='bare',
objects=['left', 'right', 'bare', 'left-bare',
'right-bare', None], doc="""
Whether and where to display the yaxis, bare options allow suppressing
all axis labels including ticks and ylabel. Valid options are 'left',
'right', 'bare' 'left-bare' and 'right-bare'.""")
| bsd-3-clause |
shyamalschandra/scikit-learn | sklearn/linear_model/coordinate_descent.py | 8 | 76416 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Gael Varoquaux <gael.varoquaux@inria.fr>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..model_selection import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..exceptions import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False,
check_input=True, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
check_input : bool, default True
Skip input validation checks, including the Gram matrix when provided
assuming there are handled by the caller when check_input=False.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
# We expect X and y to be already float64 Fortran ordered when bypassing
# checks
if check_input:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
# Xy should be a 1d contiguous array or a 2D C ordered array
Xy = check_array(Xy, dtype=np.float64, order='C', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if check_input:
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False, copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, dtype=np.float64,
order='C')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y, check_input=True):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
# We expect X and y to be already float64 Fortran ordered arrays
# when bypassing checks
if check_input:
y = np.asarray(y, dtype=np.float64)
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F',
copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
y = check_array(y, dtype=np.float64, order='F', copy=False,
ensure_2d=False)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=False)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | array-like, default=False
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if (hasattr(reference_to_old_X, "data") and
not np.may_share_memory(reference_to_old_X.data, X.data)):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv.split(X))
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float or array of floats, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = check_array(y, dtype=np.float64, ensure_2d=False)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| bsd-3-clause |
Mogeng/IOHMM | tests/test_OLS.py | 2 | 33960 | from __future__ import print_function
from __future__ import division
# import json
from past.utils import old_div
import unittest
import numpy as np
import statsmodels.api as sm
from IOHMM import OLS
# //TODO sample weight all zero
# Corner cases
# General
# 1. sample_weight is all zero
# 2. sample_weight is all one
# 3. sample_weight is a scale of all one
# 4. sample_weight is mixed of 0 and 1
# 6. when number of data is 1/or very small, less than the number of features
# 7. standard dataset compare with sklearn/statsmodels
# 8. output dimensions
# 9. collinearty in X
# 10. to/from json
# MultivariateOLS
# 1. Y is not column/row independent
# Discrete/CrossEntropyMNL
# 1. number of class is 1
# 2. number of class is 2
class UnivariateOLSTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.data_longley = sm.datasets.longley.load()
def test_ols(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.data_longley.exog, self.data_longley.endog)
# coefficient
self.assertEqual(self.model.coef.shape, (1, 7))
np.testing.assert_array_almost_equal(
self.model.coef,
np.array([-3482258.63459582, 15.0618722713733, -0.358191792925910E-01,
-2.02022980381683, -1.03322686717359, -0.511041056535807E-01,
1829.15146461355]).reshape(1, -1),
decimal=3)
# std.err of coefficient (calibrated by df_resid)
self.assertEqual(self.model.stderr.shape, (1, 7))
np.testing.assert_array_almost_equal(
old_div(self.model.stderr, np.sqrt(old_div(9., self.data_longley.exog.shape[0]))),
np.array([890420.383607373, 84.9149257747669, 0.03349,
0.488399681651699, 0.214274163161675, 0.226073200069370,
455.478499142212]).reshape(1, -1),
decimal=2)
# scale
self.assertEqual(self.model.dispersion.shape, (1, 1))
np.testing.assert_array_almost_equal(
old_div(self.model.dispersion, (old_div(9., self.data_longley.exog.shape[0]))),
np.array([[92936.0061673238]]),
decimal=3)
# predict
np.testing.assert_array_almost_equal(
self.data_longley.endog.reshape(-1, 1) - self.model.predict(self.data_longley.exog),
np.array([267.34003, -94.01394, 46.28717, -410.11462,
309.71459, -249.31122, -164.04896, -13.18036, 14.30477, 455.39409,
-17.26893, -39.05504, -155.54997, -85.67131, 341.93151,
-206.75783]).reshape(-1, 1),
decimal=3)
# loglike/_per_sample
self.assertAlmostEqual(
self.model.loglike(self.data_longley.exog, self.data_longley.endog),
-109.61743480849013,
places=3)
# to_json
json_dict = self.model.to_json('./tests/linear_models/OLS/UnivariateOLS/')
self.assertEqual(json_dict['properties']['solver'], 'pinv')
# from_json
self.model_from_json = OLS.from_json(json_dict)
np.testing.assert_array_almost_equal(
self.model.coef,
self.model_from_json.coef,
decimal=3)
np.testing.assert_array_almost_equal(
self.model.stderr,
self.model_from_json.stderr,
decimal=3)
self.assertEqual(
self.model.dispersion,
self.model_from_json.dispersion)
def test_ols_l1_regularized(self):
# sklearn elastic net and l1 does not take sample_weights, will not test
pass
def test_ols_l2_regularized(self):
# there is a bug in sklearn with weights, it can only use list right now
self.model = OLS(
solver='auto', fit_intercept=True, est_stderr=True,
reg_method='l2', alpha=0.1, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.data_longley.exog, self.data_longley.endog, sample_weight=0.5)
# coefficient
np.testing.assert_array_almost_equal(
self.model.coef,
np.array([-2.0172203, -52.14364269, 0.07089677, -0.42552125,
-0.57305292, -0.41272483, 48.32484052]).reshape(1, -1),
decimal=3)
# std.err of coefficient (calibrated by df_resid)
self.assertTrue(self.model.stderr is None)
# scale
self.assertEqual(self.model.dispersion.shape, (1, 1))
np.testing.assert_array_almost_equal(
old_div(self.model.dispersion, (old_div(9., self.data_longley.exog.shape[0]))),
np.array([[250870.081]]),
decimal=3)
# predict
np.testing.assert_array_almost_equal(
self.data_longley.endog.reshape(-1, 1) - self.model.predict(self.data_longley.exog),
np.array([[280.31871146],
[-131.6981265],
[90.64414685],
[-400.10244445],
[-440.59604167],
[-543.88595187],
[200.70483416],
[215.88629903],
[74.9456573],
[913.85128645],
[424.15996133],
[-9.5797488],
[-360.96841852],
[27.214226],
[150.87705909],
[-492.17489392]]),
decimal=3)
# loglike/_per_sample
self.assertAlmostEqual(
self.model.loglike(self.data_longley.exog, self.data_longley.endog),
-117.561627187,
places=3)
self.assertEqual(
self.model.loglike_per_sample(self.data_longley.exog, self.data_longley.endog).shape,
(16, ))
def test_ols_elastic_net_regularized(self):
# sklearn elastic net and l1 does not take sample_weights, will not test
pass
def test_ols_sample_weight_all_half(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.data_longley.exog, self.data_longley.endog, sample_weight=0.5)
# coefficient
np.testing.assert_array_almost_equal(
self.model.coef,
np.array((-3482258.63459582, 15.0618722713733, -0.358191792925910E-01,
-2.02022980381683, -1.03322686717359, -0.511041056535807E-01,
1829.15146461355)).reshape(1, -1),
decimal=3)
# std.err of coefficient (calibrated by df_resid)
np.testing.assert_array_almost_equal(
old_div(self.model.stderr, np.sqrt(old_div(9., self.data_longley.exog.shape[0]))),
np.array((890420.383607373, 84.9149257747669, 0.334910077722432E-01,
0.488399681651699, 0.214274163161675, 0.226073200069370,
455.478499142212)).reshape(1, -1),
decimal=1)
# scale
np.testing.assert_array_almost_equal(
old_div(self.model.dispersion, (old_div(9., self.data_longley.exog.shape[0]))),
np.array((92936.0061673238)))
# predict
np.testing.assert_array_almost_equal(
self.data_longley.endog.reshape(-1, 1) - self.model.predict(self.data_longley.exog),
np.array((267.34003, -94.01394, 46.28717, -410.11462,
309.71459, -249.31122, -164.04896, -13.18036, 14.30477, 455.39409,
-17.26893, -39.05504, -155.54997, -85.67131, 341.93151,
-206.75783)).reshape(-1, 1),
decimal=3)
# loglike/_per_sample
self.assertAlmostEqual(
self.model.loglike(self.data_longley.exog, self.data_longley.endog),
-109.61743480849013,
places=3)
self.assertEqual(
self.model.loglike_per_sample(self.data_longley.exog, self.data_longley.endog).shape,
(16, ))
def test_ols_sample_weight_all_zero(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.assertRaises(ValueError, self.model.fit,
self.data_longley.exog, self.data_longley.endog, 0)
def test_ols_sample_weight_half_zero_half_one(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
len_half = 8
self.model.fit(self.data_longley.exog, self.data_longley.endog,
sample_weight=np.array([1] * len_half +
[0] * (self.data_longley.exog.shape[0] - len_half)))
self.model_half = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model_half.fit(self.data_longley.exog[:len_half], self.data_longley.endog[:len_half])
# coefficient
np.testing.assert_array_almost_equal(
self.model.coef,
self.model_half.coef,
decimal=3)
# std.err
np.testing.assert_array_almost_equal(
self.model.stderr,
self.model_half.stderr,
decimal=3)
# scale
np.testing.assert_array_almost_equal(
self.model.dispersion,
self.model_half.dispersion,
decimal=3)
# corner cases
def test_ols_one_data_point(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.data_longley.exog[0:1, :],
self.data_longley.endog[0:1, ], sample_weight=0.5)
# coef
self.assertEqual(self.model.coef.shape, (1, 7))
# scale
np.testing.assert_array_almost_equal(self.model.dispersion, np.array([[0]]))
# loglike_per_sample
np.testing.assert_array_equal(self.model.loglike_per_sample(
self.data_longley.exog[0:1, :], self.data_longley.endog[0:1, ]), np.array([0]))
np.testing.assert_array_almost_equal(self.model.loglike_per_sample(
np.array(self.data_longley.exog[0:1, :].tolist() * 6),
np.array([60323, 0, 60323, 60322, 60322, 60323])),
np.array([0, -np.Infinity, 0, -np.Infinity, -np.Infinity, 0]), decimal=3)
def test_ols_multicolinearty(self):
self.model_col = OLS(
solver='pinv', fit_intercept=False, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
X = np.hstack([self.data_longley.exog[:, 0:1], self.data_longley.exog[:, 0:1]])
self.model_col.fit(X,
self.data_longley.endog, sample_weight=0.8)
self.model = OLS(
solver='pinv', fit_intercept=False, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.data_longley.exog[:, 0:1],
self.data_longley.endog, sample_weight=0.8)
# coef
np.testing.assert_array_almost_equal(
self.model_col.coef, np.array([319.47969664, 319.47969664]).reshape(1, -1), decimal=3)
# stderr
self.assertEqual(self.model_col.stderr, None)
# scale
np.testing.assert_array_almost_equal(
self.model_col.dispersion, self.model.dispersion, decimal=3)
# loglike_per_sample
np.testing.assert_array_almost_equal(
self.model_col.loglike_per_sample(X, self.data_longley.endog),
self.model.loglike_per_sample(self.data_longley.exog[:, 0:1],
self.data_longley.endog), decimal=3)
np.testing.assert_array_almost_equal(
self.model_col.predict(X),
self.model.predict(self.data_longley.exog[:, 0:1]), decimal=3)
class IndependentMultivariateOLSTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
np.random.seed(0)
cls.X = np.random.normal(size=(1000, 1))
cls.Y = np.random.normal(size=(cls.X.shape[0], 2))
def test_ols(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X, self.Y)
# coefficient
self.assertEqual(self.model.coef.shape, (2, 2))
np.testing.assert_array_almost_equal(
self.model.coef,
np.array([[-0.02924966, -0.03484827],
[-0.00978688, 0.00336316]]).reshape(2, -1),
decimal=3)
# std.err of coefficient (calibrated by df_resid)
self.assertEqual(self.model.stderr.shape, (2, 2))
np.testing.assert_array_almost_equal(
self.model.stderr,
np.array([[0.03083908, 0.03121143],
[0.03002101, 0.03038348]]).reshape(2, -1),
decimal=2)
# scale
self.assertEqual(self.model.dispersion.shape, (2, 2))
np.testing.assert_array_almost_equal(
self.model.dispersion,
np.array([[0.94905363, 0.0164185],
[0.0164185, 0.89937019]]),
decimal=3)
# loglike/_per_sample
self.assertAlmostEqual(
self.model.loglike(self.X, self.Y),
-2758.54387369,
places=3)
# to_json
json_dict = self.model.to_json('./tests/linear_models/OLS/MultivariateOLS/')
self.assertEqual(json_dict['properties']['solver'], 'pinv')
# from_json
self.model_from_json = OLS.from_json(json_dict)
np.testing.assert_array_almost_equal(
self.model.coef,
self.model_from_json.coef,
decimal=3)
np.testing.assert_array_almost_equal(
self.model.stderr,
self.model_from_json.stderr,
decimal=3)
np.testing.assert_array_almost_equal(
self.model.dispersion,
self.model_from_json.dispersion,
decimal=3)
def test_ols_l2_regularized(self):
self.model = OLS(
solver='auto', fit_intercept=True, est_stderr=True,
reg_method='l2', alpha=0.1, l1_ratio=1, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X, self.Y)
# coefficient
self.assertEqual(self.model.coef.shape, (2, 2))
np.testing.assert_array_almost_equal(
self.model.coef,
np.array([[-0.0292465, -0.03484456],
[-0.00978591, 0.00336286]]).reshape(2, -1),
decimal=3)
# std.err of coefficient (calibrated by df_resid)
self.assertTrue(self.model.stderr is None)
# scale
self.assertEqual(self.model.dispersion.shape, (2, 2))
np.testing.assert_array_almost_equal(
self.model.dispersion,
np.array([[0.94905363, 0.0164185],
[0.0164185, 0.89937019]]),
decimal=3)
# loglike/_per_sample
self.assertAlmostEqual(
self.model.loglike(self.X, self.Y),
-2758.5438737,
places=3)
def test_ols_l1_regularized(self):
# sklearn l1 and elstic net does not support sample weight
pass
def test_ols_sample_weight_all_half(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X, self.Y, sample_weight=0.5)
# coefficient
self.assertEqual(self.model.coef.shape, (2, 2))
np.testing.assert_array_almost_equal(
self.model.coef,
np.array([[-0.02924966, -0.03484827],
[-0.00978688, 0.00336316]]).reshape(2, -1),
decimal=3)
# std.err of coefficient (calibrated by df_resid)
self.assertEqual(self.model.stderr.shape, (2, 2))
np.testing.assert_array_almost_equal(
self.model.stderr,
np.array([[0.03083908, 0.03121143],
[0.03002101, 0.03038348]]).reshape(2, -1),
decimal=2)
# scale
self.assertEqual(self.model.dispersion.shape, (2, 2))
np.testing.assert_array_almost_equal(
self.model.dispersion,
np.array([[0.94905363, 0.0164185],
[0.0164185, 0.89937019]]),
decimal=3)
# loglike/_per_sample
self.assertAlmostEqual(
self.model.loglike(self.X, self.Y, 0.5),
old_div(-2758.54387369, 2.),
places=3)
self.assertEqual(
self.model.loglike_per_sample(self.X, self.Y).shape,
(1000, ))
def test_ols_sample_weight_all_zero(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.assertRaises(ValueError, self.model.fit, self.X, self.Y, 0)
def test_ols_sample_weight_half_zero_half_one(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
len_half = 500
self.model.fit(self.X, self.Y,
sample_weight=np.array([1] * len_half +
[0] * (self.X.shape[0] - len_half)))
self.model_half = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model_half.fit(self.X[:len_half], self.Y[:len_half])
# coefficient
np.testing.assert_array_almost_equal(
self.model.coef,
self.model_half.coef,
decimal=3)
# std.err
np.testing.assert_array_almost_equal(
self.model.stderr,
self.model_half.stderr,
decimal=3)
# scale
np.testing.assert_array_almost_equal(
self.model.dispersion,
self.model_half.dispersion,
decimal=3)
# corner cases
def test_ols_one_data_point(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X[0:1, :],
self.Y[0:1, ], sample_weight=0.5)
# coef
self.assertEqual(self.model.coef.shape, (2, 2))
# scale
np.testing.assert_array_almost_equal(
self.model.dispersion, np.array([[0, 0], [0, 0]]), decimal=6)
# loglike_per_sample
np.testing.assert_array_equal(self.model.loglike_per_sample(
self.X[0:1, :], self.Y[0:1, ]), np.array([0]))
np.testing.assert_array_almost_equal(self.model.loglike_per_sample(
np.array(self.X[0:1, :].tolist() * 6),
np.array([self.Y[0, ], self.Y[1, ], self.Y[0, ],
self.Y[1, ], self.Y[1, ], self.Y[0, ]])),
np.array([0, -np.Infinity, 0, -np.Infinity, -np.Infinity, 0]), decimal=3)
def test_ols_multicolinearty(self):
self.model_col = OLS(
solver='pinv', fit_intercept=False, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
X = np.hstack([self.X[:, 0:1], self.X[:, 0:1]])
self.model_col.fit(X,
self.Y, sample_weight=0.5)
self.model = OLS(
solver='pinv', fit_intercept=False, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X[:, 0:1],
self.Y, sample_weight=0.5)
# stderr
self.assertEqual(self.model_col.stderr, None)
# scale
np.testing.assert_array_almost_equal(
self.model_col.dispersion, self.model.dispersion, decimal=3)
# loglike_per_sample
np.testing.assert_array_almost_equal(
self.model_col.loglike_per_sample(X, self.Y),
self.model.loglike_per_sample(self.X[:, 0:1],
self.Y), decimal=0)
np.testing.assert_array_almost_equal(
self.model_col.predict(X),
self.model.predict(self.X[:, 0:1]), decimal=1)
class PerfectCorrelationMultivariateOLSTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
np.random.seed(0)
cls.data_longley = sm.datasets.longley.load()
cls.X = cls.data_longley.exog
cls.Y = np.hstack((cls.data_longley.endog.reshape(-1, 1),
cls.data_longley.endog.reshape(-1, 1)))
def test_ols(self):
self.model = OLS(
solver='auto', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X, self.Y)
# coefficient
self.assertEqual(self.model.coef.shape, (2, 7))
np.testing.assert_array_almost_equal(
self.model.coef,
np.array([[-3482258.63459582, 15.0618722713733, -0.358191792925910E-01,
-2.02022980381683, -1.03322686717359, -0.511041056535807E-01,
1829.15146461355],
[-3482258.63459582, 15.0618722713733, -0.358191792925910E-01,
-2.02022980381683, -1.03322686717359, -0.511041056535807E-01,
1829.15146461355]]).reshape(2, -1),
decimal=3)
# std.err of coefficient (calibrated by df_resid)
self.assertEqual(self.model.stderr.shape, (2, 7))
np.testing.assert_array_almost_equal(
old_div(self.model.stderr, np.sqrt(old_div(9., self.data_longley.exog.shape[0]))),
np.array([[890420.383607373, 84.9149257747669, 0.03349,
0.488399681651699, 0.214274163161675, 0.226073200069370,
455.478499142212],
[890420.383607373, 84.9149257747669, 0.03349,
0.488399681651699, 0.214274163161675, 0.226073200069370,
455.478499142212]]).reshape(2, -1),
decimal=2)
# scale
self.assertEqual(self.model.dispersion.shape, (2, 2))
np.testing.assert_array_almost_equal(
old_div(self.model.dispersion, (old_div(9., self.data_longley.exog.shape[0]))),
np.array([[92936.0061673238, 92936.0061673238],
[92936.0061673238, 92936.0061673238]]),
decimal=3)
# predict
np.testing.assert_array_almost_equal(
self.Y - self.model.predict(self.X),
np.hstack((np.array([267.34003, -94.01394, 46.28717, -410.11462,
309.71459, -249.31122, -164.04896, -13.18036, 14.30477, 455.39409,
-17.26893, -39.05504, -155.54997, -85.67131, 341.93151,
-206.75783]).reshape(-1, 1),
np.array([267.34003, -94.01394, 46.28717, -410.11462,
309.71459, -249.31122, -164.04896, -13.18036, 14.30477, 455.39409,
-17.26893, -39.05504, -155.54997, -85.67131, 341.93151,
-206.75783]).reshape(-1, 1))),
decimal=3)
# loglike/_per_sample
self.assertRaises(ValueError,
self.model.loglike_per_sample, self.X, self.Y)
def test_ols_l1_regularized(self):
# sklearn elastic net and l1 does not take sample_weights, will not test
pass
def test_ols_l2_regularized(self):
# there is a bug in sklearn with weights, it can only use list right now
self.model = OLS(
solver='auto', fit_intercept=True, est_stderr=True,
reg_method='l2', alpha=0.1, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X, self.Y, sample_weight=0.5)
# coefficient
np.testing.assert_array_almost_equal(
self.model.coef,
np.array([[-2.0172203, -52.14364269, 0.07089677, -0.42552125,
-0.57305292, -0.41272483, 48.32484052],
[-2.0172203, -52.14364269, 0.07089677, -0.42552125,
-0.57305292, -0.41272483, 48.32484052]]).reshape(2, -1),
decimal=3)
# std.err of coefficient (calibrated by df_resid)
self.assertTrue(self.model.stderr is None)
# scale
self.assertEqual(self.model.dispersion.shape, (2, 2))
np.testing.assert_array_almost_equal(
old_div(self.model.dispersion, (old_div(9., self.data_longley.exog.shape[0]))),
np.array([[250870.081, 250870.081],
[250870.081, 250870.081]]),
decimal=3)
# predict
res = np.array([[280.31871146],
[-131.6981265],
[90.64414685],
[-400.10244445],
[-440.59604167],
[-543.88595187],
[200.70483416],
[215.88629903],
[74.9456573],
[913.85128645],
[424.15996133],
[-9.5797488],
[-360.96841852],
[27.214226],
[150.87705909],
[-492.17489392]])
np.testing.assert_array_almost_equal(
self.Y - self.model.predict(self.X),
np.hstack((res, res)),
decimal=3)
# loglike/_per_sample
self.assertRaises(ValueError,
self.model.loglike, self.X, self.Y)
def test_ols_elastic_net_regularized(self):
# sklearn elastic net and l1 does not take sample_weights, will not test
pass
def test_ols_sample_weight_all_half(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X, self.Y, sample_weight=0.5)
# coefficient
np.testing.assert_array_almost_equal(
self.model.coef,
np.array(((-3482258.63459582, 15.0618722713733, -0.358191792925910E-01,
-2.02022980381683, -1.03322686717359, -0.511041056535807E-01,
1829.15146461355),
(-3482258.63459582, 15.0618722713733, -0.358191792925910E-01,
-2.02022980381683, -1.03322686717359, -0.511041056535807E-01,
1829.15146461355))).reshape(2, -1),
decimal=3)
# std.err of coefficient (calibrated by df_resid)
np.testing.assert_array_almost_equal(
old_div(self.model.stderr, np.sqrt(old_div(9., self.data_longley.exog.shape[0]))),
np.array(((890420.383607373, 84.9149257747669, 0.334910077722432E-01,
0.488399681651699, 0.214274163161675, 0.226073200069370,
455.478499142212),
(890420.383607373, 84.9149257747669, 0.334910077722432E-01,
0.488399681651699, 0.214274163161675, 0.226073200069370,
455.478499142212))).reshape(2, -1),
decimal=1)
# scale
np.testing.assert_array_almost_equal(
old_div(self.model.dispersion, (old_div(9., self.data_longley.exog.shape[0]))),
np.array(((92936.0061673238, 92936.0061673238),
(92936.0061673238, 92936.0061673238))),
decimal=3)
# predict
res = np.array((267.34003, -94.01394, 46.28717, -410.11462,
309.71459, -249.31122, -164.04896, -13.18036, 14.30477, 455.39409,
-17.26893, -39.05504, -155.54997, -85.67131, 341.93151,
-206.75783)).reshape(-1, 1)
np.testing.assert_array_almost_equal(
self.Y - self.model.predict(self.X),
np.hstack((res, res)),
decimal=3)
# loglike/_per_sample
self.assertRaises(ValueError,
self.model.loglike, self.X, self.Y)
def test_ols_sample_weight_all_zero(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.assertRaises(ValueError, self.model.fit, self.X, self.Y, 0)
def test_ols_sample_weight_half_zero_half_one(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
len_half = 8
self.model.fit(self.X, self.Y,
sample_weight=np.array([1] * len_half +
[0] * (self.data_longley.exog.shape[0] - len_half)))
self.model_half = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model_half.fit(self.X[:len_half], self.Y[:len_half])
# coefficient
np.testing.assert_array_almost_equal(
self.model.coef,
self.model_half.coef,
decimal=3)
# std.err
np.testing.assert_array_almost_equal(
self.model.stderr,
self.model_half.stderr,
decimal=3)
# scale
np.testing.assert_array_almost_equal(
self.model.dispersion,
self.model_half.dispersion,
decimal=3)
# corner cases
def test_ols_one_data_point(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X[0:1, :],
self.Y[0:1, ], sample_weight=0.5)
# coef
self.assertEqual(self.model.coef.shape, (2, 7))
# scale
np.testing.assert_array_almost_equal(
self.model.dispersion, np.array([[0, 0], [0, 0]]), decimal=6)
# loglike_per_sample
np.testing.assert_array_equal(self.model.loglike_per_sample(
self.X[0:1, :], self.Y[0:1, ]), np.array([0]))
np.testing.assert_array_almost_equal(self.model.loglike_per_sample(
np.array(self.X[0:1, :].tolist() * 6),
np.array([[60323, 60323], [0, 60323], [60323, 60323],
[60322, 60323], [60322, 60322], [60323, 60323]])),
np.array([0, -np.Infinity, 0, -np.Infinity, -np.Infinity, 0]), decimal=3)
def test_ols_multicolinearty(self):
self.model_col = OLS(
solver='pinv', fit_intercept=False, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
X = np.hstack([self.X[:, 0:1], self.X[:, 0:1]])
self.model_col.fit(X,
self.Y, sample_weight=0.8)
self.model = OLS(
solver='pinv', fit_intercept=False, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X[:, 0:1],
self.Y, sample_weight=0.8)
# coef
np.testing.assert_array_almost_equal(
self.model_col.coef, np.array([[319.47969664, 319.47969664],
[319.47969664, 319.47969664]]).reshape(2, -1), decimal=3)
# stderr
self.assertEqual(self.model_col.stderr, None)
# scale
np.testing.assert_array_almost_equal(
self.model_col.dispersion, self.model.dispersion, decimal=3)
# loglike_per_sample
self.assertRaises(ValueError,
self.model_col.loglike, X, self.Y)
np.testing.assert_array_almost_equal(
self.model_col.predict(X),
self.model.predict(self.X[:, 0:1]), decimal=3)
| bsd-3-clause |
mdhaber/scipy | scipy/ndimage/interpolation.py | 12 | 35344 | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import itertools
import warnings
import numpy
from numpy.core.multiarray import normalize_axis_index
from scipy import special
from . import _ni_support
from . import _nd_image
from ._ni_docstrings import docfiller
__all__ = ['spline_filter1d', 'spline_filter', 'geometric_transform',
'map_coordinates', 'affine_transform', 'shift', 'zoom', 'rotate']
@docfiller
def spline_filter1d(input, order=3, axis=-1, output=numpy.float64,
mode='mirror'):
"""
Calculate a 1-D spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
Parameters
----------
%(input)s
order : int, optional
The order of the spline, default is 3.
axis : int, optional
The axis along which the spline filter is applied. Default is the last
axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array. Default is ``numpy.float64``.
%(mode_interp_mirror)s
Returns
-------
spline_filter1d : ndarray
The filtered input.
Notes
-----
All functions in `ndimage.interpolation` do spline interpolation of
the input image. If using B-splines of `order > 1`, the input image
values have to be converted to B-spline coefficients first, which is
done by applying this 1-D filter sequentially along all
axes of the input. All functions that require B-spline coefficients
will automatically filter their inputs, a behavior controllable with
the `prefilter` keyword argument. For functions that accept a `mode`
parameter, the result will only be correct if it matches the `mode`
used when filtering.
For complex-valued `input`, this function processes the real and imaginary
components independently.
.. versionadded:: 1.6.0
Complex-valued support added.
See Also
--------
spline_filter : Multidimensional spline filter.
Examples
--------
We can filter an image using 1-D spline along the given axis:
>>> from scipy.ndimage import spline_filter1d
>>> import matplotlib.pyplot as plt
>>> orig_img = np.eye(20) # create an image
>>> orig_img[10, :] = 1.0
>>> sp_filter_axis_0 = spline_filter1d(orig_img, axis=0)
>>> sp_filter_axis_1 = spline_filter1d(orig_img, axis=1)
>>> f, ax = plt.subplots(1, 3, sharex=True)
>>> for ind, data in enumerate([[orig_img, "original image"],
... [sp_filter_axis_0, "spline filter (axis=0)"],
... [sp_filter_axis_1, "spline filter (axis=1)"]]):
... ax[ind].imshow(data[0], cmap='gray_r')
... ax[ind].set_title(data[1])
>>> plt.tight_layout()
>>> plt.show()
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
complex_output = numpy.iscomplexobj(input)
output = _ni_support._get_output(output, input,
complex_output=complex_output)
if complex_output:
spline_filter1d(input.real, order, axis, output.real, mode)
spline_filter1d(input.imag, order, axis, output.imag, mode)
return output
if order in [0, 1]:
output[...] = numpy.array(input)
else:
mode = _ni_support._extend_mode_to_code(mode)
axis = normalize_axis_index(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output, mode)
return output
def spline_filter(input, order=3, output=numpy.float64, mode='mirror'):
"""
Multidimensional spline filter.
For more details, see `spline_filter1d`.
See Also
--------
spline_filter1d : Calculate a 1-D spline filter along the given axis.
Notes
-----
The multidimensional filter is implemented as a sequence of
1-D spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
For complex-valued `input`, this function processes the real and imaginary
components independently.
.. versionadded:: 1.6.0
Complex-valued support added.
Examples
--------
We can filter an image using multidimentional splines:
>>> from scipy.ndimage import spline_filter
>>> import matplotlib.pyplot as plt
>>> orig_img = np.eye(20) # create an image
>>> orig_img[10, :] = 1.0
>>> sp_filter = spline_filter(orig_img, order=3)
>>> f, ax = plt.subplots(1, 2, sharex=True)
>>> for ind, data in enumerate([[orig_img, "original image"],
... [sp_filter, "spline filter"]]):
... ax[ind].imshow(data[0], cmap='gray_r')
... ax[ind].set_title(data[1])
>>> plt.tight_layout()
>>> plt.show()
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
complex_output = numpy.iscomplexobj(input)
output = _ni_support._get_output(output, input,
complex_output=complex_output)
if complex_output:
spline_filter(input.real, order, output.real, mode)
spline_filter(input.imag, order, output.imag, mode)
return output
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output=output, mode=mode)
input = output
else:
output[...] = input[...]
return output
def _prepad_for_spline_filter(input, mode, cval):
if mode in ['nearest', 'grid-constant']:
npad = 12
if mode == 'grid-constant':
padded = numpy.pad(input, npad, mode='constant',
constant_values=cval)
elif mode == 'nearest':
padded = numpy.pad(input, npad, mode='edge')
else:
# other modes have exact boundary conditions implemented so
# no prepadding is needed
npad = 0
padded = input
return padded, npad
@docfiller
def geometric_transform(input, mapping, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True,
extra_arguments=(), extra_keywords={}):
"""
Apply an arbitrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
Parameters
----------
%(input)s
mapping : {callable, scipy.LowLevelCallable}
A callable object that accepts a tuple of length equal to the output
array rank, and returns the corresponding input coordinates as a tuple
of length equal to the input array rank.
output_shape : tuple of ints, optional
Shape tuple.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode_interp_constant)s
%(cval)s
%(prefilter)s
extra_arguments : tuple, optional
Extra arguments passed to `mapping`.
extra_keywords : dict, optional
Extra keywords passed to `mapping`.
Returns
-------
output : ndarray
The filtered input.
See Also
--------
map_coordinates, affine_transform, spline_filter1d
Notes
-----
This function also accepts low-level callback functions with one
the following signatures and wrapped in `scipy.LowLevelCallable`:
.. code:: c
int mapping(npy_intp *output_coordinates, double *input_coordinates,
int output_rank, int input_rank, void *user_data)
int mapping(intptr_t *output_coordinates, double *input_coordinates,
int output_rank, int input_rank, void *user_data)
The calling function iterates over the elements of the output array,
calling the callback function at each element. The coordinates of the
current output element are passed through ``output_coordinates``. The
callback function must return the coordinates at which the input must
be interpolated in ``input_coordinates``. The rank of the input and
output arrays are given by ``input_rank`` and ``output_rank``
respectively. ``user_data`` is the data pointer provided
to `scipy.LowLevelCallable` as-is.
The callback function must return an integer error status that is zero
if something went wrong and one otherwise. If an error occurs, you should
normally set the Python error status with an informative message
before returning, otherwise a default error message is set by the
calling function.
In addition, some other low-level function pointer specifications
are accepted, but these are for backward compatibility only and should
not be used in new code.
For complex-valued `input`, this function transforms the real and imaginary
components independently.
.. versionadded:: 1.6.0
Complex-valued support added.
Examples
--------
>>> import numpy as np
>>> from scipy.ndimage import geometric_transform
>>> a = np.arange(12.).reshape((4, 3))
>>> def shift_func(output_coords):
... return (output_coords[0] - 0.5, output_coords[1] - 0.5)
...
>>> geometric_transform(a, shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.362, 2.738],
[ 0. , 4.812, 6.187],
[ 0. , 8.263, 9.637]])
>>> b = [1, 2, 3, 4, 5]
>>> def shift_func(output_coords):
... return (output_coords[0] - 3,)
...
>>> geometric_transform(b, shift_func, mode='constant')
array([0, 0, 0, 1, 2])
>>> geometric_transform(b, shift_func, mode='nearest')
array([1, 1, 1, 1, 2])
>>> geometric_transform(b, shift_func, mode='reflect')
array([3, 2, 1, 1, 2])
>>> geometric_transform(b, shift_func, mode='wrap')
array([2, 3, 4, 1, 2])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
complex_output = numpy.iscomplexobj(input)
output = _ni_support._get_output(output, input, shape=output_shape,
complex_output=complex_output)
if complex_output:
kwargs = dict(order=order, mode=mode, prefilter=prefilter,
output_shape=output_shape,
extra_arguments=extra_arguments,
extra_keywords=extra_keywords)
geometric_transform(input.real, mapping, output=output.real,
cval=numpy.real(cval), **kwargs)
geometric_transform(input.imag, mapping, output=output.imag,
cval=numpy.imag(cval), **kwargs)
return output
if prefilter and order > 1:
padded, npad = _prepad_for_spline_filter(input, mode, cval)
filtered = spline_filter(padded, order, output=numpy.float64,
mode=mode)
else:
npad = 0
filtered = input
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.geometric_transform(filtered, mapping, None, None, None, output,
order, mode, cval, npad, extra_arguments,
extra_keywords)
return output
@docfiller
def map_coordinates(input, coordinates, output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
%(input)s
coordinates : array_like
The coordinates at which `input` is evaluated.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode_interp_constant)s
%(cval)s
%(prefilter)s
Returns
-------
map_coordinates : ndarray
The result of transforming the input. The shape of the output is
derived from that of `coordinates` by dropping the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Notes
-----
For complex-valued `input`, this function maps the real and imaginary
components independently.
.. versionadded:: 1.6.0
Complex-valued support added.
Examples
--------
>>> from scipy import ndimage
>>> a = np.arange(12.).reshape((4, 3))
>>> a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
array([ 2., 7.])
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool)
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
complex_output = numpy.iscomplexobj(input)
output = _ni_support._get_output(output, input, shape=output_shape,
complex_output=complex_output)
if complex_output:
kwargs = dict(order=order, mode=mode, prefilter=prefilter)
map_coordinates(input.real, coordinates, output=output.real,
cval=numpy.real(cval), **kwargs)
map_coordinates(input.imag, coordinates, output=output.imag,
cval=numpy.imag(cval), **kwargs)
return output
if prefilter and order > 1:
padded, npad = _prepad_for_spline_filter(input, mode, cval)
filtered = spline_filter(padded, order, output=numpy.float64,
mode=mode)
else:
npad = 0
filtered = input
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, npad, None, None)
return output
@docfiller
def affine_transform(input, matrix, offset=0.0, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Apply an affine transformation.
Given an output image pixel index vector ``o``, the pixel value
is determined from the input image at position
``np.dot(matrix, o) + offset``.
This does 'pull' (or 'backward') resampling, transforming the output space
to the input to locate data. Affine transformations are often described in
the 'push' (or 'forward') direction, transforming input to output. If you
have a matrix for the 'push' transformation, use its inverse
(:func:`numpy.linalg.inv`) in this function.
Parameters
----------
%(input)s
matrix : ndarray
The inverse coordinate transformation matrix, mapping output
coordinates to input coordinates. If ``ndim`` is the number of
dimensions of ``input``, the given matrix must have one of the
following shapes:
- ``(ndim, ndim)``: the linear transformation matrix for each
output coordinate.
- ``(ndim,)``: assume that the 2-D transformation matrix is
diagonal, with the diagonal specified by the given value. A more
efficient algorithm is then used that exploits the separability
of the problem.
- ``(ndim + 1, ndim + 1)``: assume that the transformation is
specified using homogeneous coordinates [1]_. In this case, any
value passed to ``offset`` is ignored.
- ``(ndim, ndim + 1)``: as above, but the bottom row of a
homogeneous transformation matrix is always ``[0, 0, ..., 1]``,
and may be omitted.
offset : float or sequence, optional
The offset into the array where the transform is applied. If a float,
`offset` is the same for each axis. If a sequence, `offset` should
contain one value for each axis.
output_shape : tuple of ints, optional
Shape tuple.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode_interp_constant)s
%(cval)s
%(prefilter)s
Returns
-------
affine_transform : ndarray
The transformed input.
Notes
-----
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode.
.. versionchanged:: 0.18.0
Previously, the exact interpretation of the affine transformation
depended on whether the matrix was supplied as a 1-D or a
2-D array. If a 1-D array was supplied
to the matrix parameter, the output pixel value at index ``o``
was determined from the input image at position
``matrix * (o + offset)``.
For complex-valued `input`, this function transforms the real and imaginary
components independently.
.. versionadded:: 1.6.0
Complex-valued support added.
References
----------
.. [1] https://en.wikipedia.org/wiki/Homogeneous_coordinates
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if output_shape is None:
if isinstance(output, numpy.ndarray):
output_shape = output.shape
else:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
complex_output = numpy.iscomplexobj(input)
output = _ni_support._get_output(output, input, shape=output_shape,
complex_output=complex_output)
if complex_output:
kwargs = dict(offset=offset, output_shape=output_shape, order=order,
mode=mode, prefilter=prefilter)
affine_transform(input.real, matrix, output=output.real,
cval=numpy.real(cval), **kwargs)
affine_transform(input.imag, matrix, output=output.imag,
cval=numpy.imag(cval), **kwargs)
return output
if prefilter and order > 1:
padded, npad = _prepad_for_spline_filter(input, mode, cval)
filtered = spline_filter(padded, order, output=numpy.float64,
mode=mode)
else:
npad = 0
filtered = input
mode = _ni_support._extend_mode_to_code(mode)
matrix = numpy.asarray(matrix, dtype=numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if (matrix.ndim == 2 and matrix.shape[1] == input.ndim + 1 and
(matrix.shape[0] in [input.ndim, input.ndim + 1])):
if matrix.shape[0] == input.ndim + 1:
exptd = [0] * input.ndim + [1]
if not numpy.all(matrix[input.ndim] == exptd):
msg = ('Expected homogeneous transformation matrix with '
'shape %s for image shape %s, but bottom row was '
'not equal to %s' % (matrix.shape, input.shape, exptd))
raise ValueError(msg)
# assume input is homogeneous coordinate transformation matrix
offset = matrix[:input.ndim, input.ndim]
matrix = matrix[:input.ndim, :input.ndim]
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype=numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
warnings.warn(
"The behavior of affine_transform with a 1-D "
"array supplied for the matrix parameter has changed in "
"SciPy 0.18.0."
)
_nd_image.zoom_shift(filtered, matrix, offset/matrix, output, order,
mode, cval, npad, False)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, npad, None,
None)
return output
@docfiller
def shift(input, shift, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Shift an array.
The array is shifted using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode.
Parameters
----------
%(input)s
shift : float or sequence
The shift along the axes. If a float, `shift` is the same for each
axis. If a sequence, `shift` should contain one value for each axis.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode_interp_constant)s
%(cval)s
%(prefilter)s
Returns
-------
shift : ndarray
The shifted input.
Notes
-----
For complex-valued `input`, this function shifts the real and imaginary
components independently.
.. versionadded:: 1.6.0
Complex-valued support added.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
complex_output = numpy.iscomplexobj(input)
output = _ni_support._get_output(output, input,
complex_output=complex_output)
if complex_output:
# import under different name to avoid confusion with shift parameter
from scipy.ndimage.interpolation import shift as _shift
kwargs = dict(order=order, mode=mode, prefilter=prefilter)
_shift(input.real, shift, output=output.real, cval=numpy.real(cval),
**kwargs)
_shift(input.imag, shift, output=output.imag, cval=numpy.imag(cval),
**kwargs)
return output
if prefilter and order > 1:
padded, npad = _prepad_for_spline_filter(input, mode, cval)
filtered = spline_filter(padded, order, output=numpy.float64,
mode=mode)
else:
npad = 0
filtered = input
mode = _ni_support._extend_mode_to_code(mode)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype=numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval,
npad, False)
return output
@docfiller
def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0,
prefilter=True, *, grid_mode=False):
"""
Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Parameters
----------
%(input)s
zoom : float or sequence
The zoom factor along the axes. If a float, `zoom` is the same for each
axis. If a sequence, `zoom` should contain one value for each axis.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode_interp_constant)s
%(cval)s
%(prefilter)s
grid_mode : bool, optional
If False, the distance from the pixel centers is zoomed. Otherwise, the
distance including the full pixel extent is used. For example, a 1d
signal of length 5 is considered to have length 4 when `grid_mode` is
False, but length 5 when `grid_mode` is True. See the following
visual illustration:
.. code-block:: text
| pixel 1 | pixel 2 | pixel 3 | pixel 4 | pixel 5 |
|<-------------------------------------->|
vs.
|<----------------------------------------------->|
The starting point of the arrow in the diagram above corresponds to
coordinate location 0 in each mode.
Returns
-------
zoom : ndarray
The zoomed input.
Notes
-----
For complex-valued `input`, this function zooms the real and imaginary
components independently.
.. versionadded:: 1.6.0
Complex-valued support added.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.zoom(ascent, 3.0)
>>> ax1.imshow(ascent, vmin=0, vmax=255)
>>> ax2.imshow(result, vmin=0, vmax=255)
>>> plt.show()
>>> print(ascent.shape)
(512, 512)
>>> print(result.shape)
(1536, 1536)
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple(
[int(round(ii * jj)) for ii, jj in zip(input.shape, zoom)])
complex_output = numpy.iscomplexobj(input)
output = _ni_support._get_output(output, input, shape=output_shape,
complex_output=complex_output)
if complex_output:
# import under different name to avoid confusion with zoom parameter
from scipy.ndimage.interpolation import zoom as _zoom
kwargs = dict(order=order, mode=mode, prefilter=prefilter)
_zoom(input.real, zoom, output=output.real, cval=numpy.real(cval),
**kwargs)
_zoom(input.imag, zoom, output=output.imag, cval=numpy.imag(cval),
**kwargs)
return output
if prefilter and order > 1:
padded, npad = _prepad_for_spline_filter(input, mode, cval)
filtered = spline_filter(padded, order, output=numpy.float64,
mode=mode)
else:
npad = 0
filtered = input
if grid_mode:
# warn about modes that may have surprising behavior
suggest_mode = None
if mode == 'constant':
suggest_mode = 'grid-constant'
elif mode == 'wrap':
suggest_mode = 'grid-wrap'
if suggest_mode is not None:
warnings.warn(
("It is recommended to use mode = {} instead of {} when "
"grid_mode is True."
).format(suggest_mode, mode)
)
mode = _ni_support._extend_mode_to_code(mode)
zoom_div = numpy.array(output_shape)
zoom_nominator = numpy.array(input.shape)
if not grid_mode:
zoom_div -= 1
zoom_nominator -= 1
# Zooming to infinite values is unpredictable, so just choose
# zoom factor 1 instead
zoom = numpy.divide(zoom_nominator, zoom_div,
out=numpy.ones_like(input.shape, dtype=numpy.float64),
where=zoom_div != 0)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval, npad,
grid_mode)
return output
@docfiller
def rotate(input, angle, axes=(1, 0), reshape=True, output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Rotate an array.
The array is rotated in the plane defined by the two axes given by the
`axes` parameter using spline interpolation of the requested order.
Parameters
----------
%(input)s
angle : float
The rotation angle in degrees.
axes : tuple of 2 ints, optional
The two axes that define the plane of rotation. Default is the first
two axes.
reshape : bool, optional
If `reshape` is true, the output shape is adapted so that the input
array is contained completely in the output. Default is True.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode_interp_constant)s
%(cval)s
%(prefilter)s
Returns
-------
rotate : ndarray
The rotated input.
Notes
-----
For complex-valued `input`, this function rotates the real and imaginary
components independently.
.. versionadded:: 1.6.0
Complex-valued support added.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(figsize=(10, 3))
>>> ax1, ax2, ax3 = fig.subplots(1, 3)
>>> img = misc.ascent()
>>> img_45 = ndimage.rotate(img, 45, reshape=False)
>>> full_img_45 = ndimage.rotate(img, 45, reshape=True)
>>> ax1.imshow(img, cmap='gray')
>>> ax1.set_axis_off()
>>> ax2.imshow(img_45, cmap='gray')
>>> ax2.set_axis_off()
>>> ax3.imshow(full_img_45, cmap='gray')
>>> ax3.set_axis_off()
>>> fig.set_tight_layout(True)
>>> plt.show()
>>> print(img.shape)
(512, 512)
>>> print(img_45.shape)
(512, 512)
>>> print(full_img_45.shape)
(724, 724)
"""
input_arr = numpy.asarray(input)
ndim = input_arr.ndim
if ndim < 2:
raise ValueError('input array should be at least 2D')
axes = list(axes)
if len(axes) != 2:
raise ValueError('axes should contain exactly two values')
if not all([float(ax).is_integer() for ax in axes]):
raise ValueError('axes should contain only integer values')
if axes[0] < 0:
axes[0] += ndim
if axes[1] < 0:
axes[1] += ndim
if axes[0] < 0 or axes[1] < 0 or axes[0] >= ndim or axes[1] >= ndim:
raise ValueError('invalid rotation plane specified')
axes.sort()
c, s = special.cosdg(angle), special.sindg(angle)
rot_matrix = numpy.array([[c, s],
[-s, c]])
img_shape = numpy.asarray(input_arr.shape)
in_plane_shape = img_shape[axes]
if reshape:
# Compute transformed input bounds
iy, ix = in_plane_shape
out_bounds = rot_matrix @ [[0, 0, iy, iy],
[0, ix, 0, ix]]
# Compute the shape of the transformed input plane
out_plane_shape = (out_bounds.ptp(axis=1) + 0.5).astype(int)
else:
out_plane_shape = img_shape[axes]
out_center = rot_matrix @ ((out_plane_shape - 1) / 2)
in_center = (in_plane_shape - 1) / 2
offset = in_center - out_center
output_shape = img_shape
output_shape[axes] = out_plane_shape
output_shape = tuple(output_shape)
complex_output = numpy.iscomplexobj(input_arr)
output = _ni_support._get_output(output, input_arr, shape=output_shape,
complex_output=complex_output)
if ndim <= 2:
affine_transform(input_arr, rot_matrix, offset, output_shape, output,
order, mode, cval, prefilter)
else:
# If ndim > 2, the rotation is applied over all the planes
# parallel to axes
planes_coord = itertools.product(
*[[slice(None)] if ax in axes else range(img_shape[ax])
for ax in range(ndim)])
out_plane_shape = tuple(out_plane_shape)
for coordinates in planes_coord:
ia = input_arr[coordinates]
oa = output[coordinates]
affine_transform(ia, rot_matrix, offset, out_plane_shape,
oa, order, mode, cval, prefilter)
return output
| bsd-3-clause |
rvraghav93/scikit-learn | sklearn/model_selection/tests/test_validation.py | 3 | 43270 | """Test the validation module"""
from __future__ import division
import sys
import warnings
import tempfile
import os
from time import sleep
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import permutation_test_score
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import learning_curve
from sklearn.model_selection import validation_curve
from sklearn.model_selection._validation import _check_is_permutation
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator
from sklearn.multiclass import OneVsRestClassifier
from sklearn.utils import shuffle
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection.tests.common import OneTimeSplitter
from sklearn.model_selection import GridSearchCV
try:
WindowsError
except NameError:
WindowsError = None
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
class MockEstimatorWithSingleFitCallAllowed(MockEstimatorWithParameter):
"""Dummy classifier that disallows repeated calls of fit method"""
def fit(self, X_subset, y_subset):
assert_false(
hasattr(self, 'fit_called_'),
'fit is called the second time'
)
self.fit_called_ = True
return super(type(self), self).fit(X_subset, y_subset)
def predict(self, X):
raise NotImplementedError
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
# XXX: use 2D array, since 1D X is being detected as a single sample in
# check_consistent_length
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
y = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
# The number of samples per class needs to be > n_splits,
# for StratifiedKFold(n_splits=3)
y2 = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3, 3])
P_sparse = coo_matrix(np.eye(5))
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cross_val_score(clf, X, y2)
assert_array_equal(scores, clf.score(X, y2))
# test with multioutput y
multioutput_y = np.column_stack([y2, y2[::-1]])
scores = cross_val_score(clf, X_sparse, multioutput_y)
assert_array_equal(scores, clf.score(X_sparse, multioutput_y))
scores = cross_val_score(clf, X_sparse, y2)
assert_array_equal(scores, clf.score(X_sparse, y2))
# test with multioutput y
scores = cross_val_score(clf, X_sparse, multioutput_y)
assert_array_equal(scores, clf.score(X_sparse, multioutput_y))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cross_val_score(clf, X.tolist(), y2.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cross_val_score(clf, X, y2.tolist())
assert_raises(ValueError, cross_val_score, clf, X, y2, scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cross_val_score(clf, X_3d, y2)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cross_val_score, clf, X_3d, y2)
def test_cross_val_score_predict_groups():
# Check if ValueError (when groups is None) propagates to cross_val_score
# and cross_val_predict
# And also check if groups is correctly passed to the cv object
X, y = make_classification(n_samples=20, n_classes=2, random_state=0)
clf = SVC(kernel="linear")
group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(),
GroupShuffleSplit()]
for cv in group_cvs:
assert_raise_message(ValueError,
"The 'groups' parameter should not be None.",
cross_val_score, estimator=clf, X=X, y=y, cv=cv)
assert_raise_message(ValueError,
"The 'groups' parameter should not be None.",
cross_val_predict, estimator=clf, X=X, y=y, cv=cv)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
# 3 fold cross val is used so we need atleast 3 samples per class
X_df, y_ser = InputFeatureType(X), TargetType(y2)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
kfold = KFold(5)
scores_indices = cross_val_score(svm, X, y, cv=kfold)
kfold = KFold(5)
cv_masks = []
for train, test in kfold.split(X, y):
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cross_val_score(svm, X, y)
assert_array_almost_equal(score_precomputed, score_linear)
# test with callable
svm = SVC(kernel=lambda x, y: np.dot(x, y.T))
score_callable = cross_val_score(svm, X, y)
assert_array_almost_equal(score_precomputed, score_callable)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cross_val_score, BrokenEstimator(), X)
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
neg_mse_scores = cross_val_score(reg, X, y, cv=5,
scoring="neg_mean_squared_error")
expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = StratifiedKFold(2)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_group, _, pvalue_group = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
groups=np.ones(y.size), random_state=0)
assert_true(score_group == score)
assert_true(pvalue_group == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = StratifiedKFold(2)
score_group, _, pvalue_group = permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", groups=np.ones(y.size), random_state=0)
assert_true(score_group == score)
assert_true(pvalue_group == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum()) /
y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
permutation_test_score(p, X, y, cv=5)
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cross_val_score(p, X, y, cv=5)
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cross_val_score(clf, X, y, scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = KFold()
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv.split(X, y):
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = LeaveOneOut()
preds = cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
class BadCV():
def split(self, X, y=None, groups=None):
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cross_val_predict, est, X, y, cv=BadCV())
def test_cross_val_predict_input_types():
iris = load_iris()
X, y = iris.data, iris.target
X_sparse = coo_matrix(X)
multioutput_y = np.column_stack([y, y[::-1]])
clf = Ridge(fit_intercept=False, random_state=0)
# 3 fold cv is used --> atleast 3 samples per class
# Smoke test
predictions = cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (150,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, multioutput_y)
assert_equal(predictions.shape, (150, 2))
predictions = cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (150,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, multioutput_y)
assert_array_equal(predictions.shape, (150, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (150,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y2)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_predict(clf, X_df, y_ser)
def test_cross_val_score_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_learning_curve():
n_samples = 30
n_splits = 3
X, y = make_classification(n_samples=n_samples, n_features=1,
n_informative=1, n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(n_samples * ((n_splits - 1) / n_splits))
for shuffle_train in [False, True]:
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=KFold(n_splits=n_splits),
train_sizes=np.linspace(0.1, 1.0, 10),
shuffle=shuffle_train)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
# Test a custom cv splitter that can iterate only once
with warnings.catch_warnings(record=True) as w:
train_sizes2, train_scores2, test_scores2 = learning_curve(
estimator, X, y,
cv=OneTimeSplitter(n_splits=n_splits, n_samples=n_samples),
train_sizes=np.linspace(0.1, 1.0, 10),
shuffle=shuffle_train)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores2, train_scores)
assert_array_almost_equal(test_scores2, test_scores)
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
for shuffle_train in [False, True]:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10), shuffle=shuffle_train)
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(max_iter=1, tol=None,
shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n_splits=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_with_shuffle():
# Following test case was designed this way to verify the code
# changes made in pull request: #7506.
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [11, 12], [13, 14], [15, 16],
[17, 18], [19, 20], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18]])
y = np.array([1, 1, 1, 2, 3, 4, 1, 1, 2, 3, 4, 1, 2, 3, 4])
groups = np.array([1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 4, 4, 4, 4])
# Splits on these groups fail without shuffle as the first iteration
# of the learning curve doesn't contain label 4 in the training set.
estimator = PassiveAggressiveClassifier(max_iter=5, tol=None,
shuffle=False)
cv = GroupKFold(n_splits=2)
train_sizes_batch, train_scores_batch, test_scores_batch = learning_curve(
estimator, X, y, cv=cv, n_jobs=1, train_sizes=np.linspace(0.3, 1.0, 3),
groups=groups, shuffle=True, random_state=2)
assert_array_almost_equal(train_scores_batch.mean(axis=1),
np.array([0.75, 0.3, 0.36111111]))
assert_array_almost_equal(test_scores_batch.mean(axis=1),
np.array([0.36111111, 0.25, 0.25]))
assert_raises(ValueError, learning_curve, estimator, X, y, cv=cv, n_jobs=1,
train_sizes=np.linspace(0.3, 1.0, 3), groups=groups)
train_sizes_inc, train_scores_inc, test_scores_inc = learning_curve(
estimator, X, y, cv=cv, n_jobs=1, train_sizes=np.linspace(0.3, 1.0, 3),
groups=groups, shuffle=True, random_state=2,
exploit_incremental_learning=True)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
def test_validation_curve_clone_estimator():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(1, 0, 10)
_, _ = validation_curve(
MockEstimatorWithSingleFitCallAllowed(), X, y,
param_name="param", param_range=param_range, cv=2
)
def test_validation_curve_cv_splits_consistency():
n_samples = 100
n_splits = 5
X, y = make_classification(n_samples=100, random_state=0)
scores1 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
'C', [0.1, 0.1, 0.2, 0.2],
cv=OneTimeSplitter(n_splits=n_splits,
n_samples=n_samples))
# The OneTimeSplitter is a non-re-entrant cv splitter. Unless, the
# `split` is called for each parameter, the following should produce
# identical results for param setting 1 and param setting 2 as both have
# the same C value.
assert_array_almost_equal(*np.vsplit(np.hstack(scores1)[(0, 2, 1, 3), :],
2))
scores2 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
'C', [0.1, 0.1, 0.2, 0.2],
cv=KFold(n_splits=n_splits, shuffle=True))
# For scores2, compare the 1st and 2nd parameter's scores
# (Since the C value for 1st two param setting is 0.1, they must be
# consistent unless the train test folds differ between the param settings)
assert_array_almost_equal(*np.vsplit(np.hstack(scores2)[(0, 2, 1, 3), :],
2))
scores3 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
'C', [0.1, 0.1, 0.2, 0.2],
cv=KFold(n_splits=n_splits))
# OneTimeSplitter is basically unshuffled KFold(n_splits=5). Sanity check.
assert_array_almost_equal(np.array(scores3), np.array(scores1))
def test_check_is_permutation():
rng = np.random.RandomState(0)
p = np.arange(100)
rng.shuffle(p)
assert_true(_check_is_permutation(p, 100))
assert_false(_check_is_permutation(np.delete(p, 23), 100))
p[0] = 23
assert_false(_check_is_permutation(p, 100))
# Check if the additional duplicate indices are caught
assert_false(_check_is_permutation(np.hstack((p, 0)), 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cross_val_predict(classif, X, y, cv=10)
preds_sparse = cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
def check_cross_val_predict_with_method(est):
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=0)
classes = len(set(y))
kfold = KFold()
methods = ['decision_function', 'predict_proba', 'predict_log_proba']
for method in methods:
predictions = cross_val_predict(est, X, y, method=method)
assert_equal(len(predictions), len(y))
expected_predictions = np.zeros([len(y), classes])
func = getattr(est, method)
# Naive loop (should be same as cross_val_predict):
for train, test in kfold.split(X, y):
est.fit(X[train], y[train])
expected_predictions[test] = func(X[test])
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold)
assert_array_almost_equal(expected_predictions, predictions)
# Test alternative representations of y
predictions_y1 = cross_val_predict(est, X, y + 1, method=method,
cv=kfold)
assert_array_equal(predictions, predictions_y1)
predictions_y2 = cross_val_predict(est, X, y - 2, method=method,
cv=kfold)
assert_array_equal(predictions, predictions_y2)
predictions_ystr = cross_val_predict(est, X, y.astype('str'),
method=method, cv=kfold)
assert_array_equal(predictions, predictions_ystr)
def test_cross_val_predict_with_method():
check_cross_val_predict_with_method(LogisticRegression())
def test_gridsearchcv_cross_val_predict_with_method():
est = GridSearchCV(LogisticRegression(random_state=42),
{'C': [0.1, 1]},
cv=2)
check_cross_val_predict_with_method(est)
def get_expected_predictions(X, y, cv, classes, est, method):
expected_predictions = np.zeros([len(y), classes])
func = getattr(est, method)
for train, test in cv.split(X, y):
est.fit(X[train], y[train])
expected_predictions_ = func(X[test])
# To avoid 2 dimensional indexing
exp_pred_test = np.zeros((len(test), classes))
if method is 'decision_function' and len(est.classes_) == 2:
exp_pred_test[:, est.classes_[-1]] = expected_predictions_
else:
exp_pred_test[:, est.classes_] = expected_predictions_
expected_predictions[test] = exp_pred_test
return expected_predictions
def test_cross_val_predict_class_subset():
X = np.arange(8).reshape(4, 2)
y = np.array([0, 0, 1, 2])
classes = 3
kfold3 = KFold(n_splits=3)
kfold4 = KFold(n_splits=4)
le = LabelEncoder()
methods = ['decision_function', 'predict_proba', 'predict_log_proba']
for method in methods:
est = LogisticRegression()
# Test with n_splits=3
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold3)
# Runs a naive loop (should be same as cross_val_predict):
expected_predictions = get_expected_predictions(X, y, kfold3, classes,
est, method)
assert_array_almost_equal(expected_predictions, predictions)
# Test with n_splits=4
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold4)
expected_predictions = get_expected_predictions(X, y, kfold4, classes,
est, method)
assert_array_almost_equal(expected_predictions, predictions)
# Testing unordered labels
y = [1, 1, -4, 6]
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold3)
y = le.fit_transform(y)
expected_predictions = get_expected_predictions(X, y, kfold3, classes,
est, method)
assert_array_almost_equal(expected_predictions, predictions)
def test_score_memmap():
# Ensure a scalar score of memmap type is accepted
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
tf = tempfile.NamedTemporaryFile(mode='wb', delete=False)
tf.write(b'Hello world!!!!!')
tf.close()
scores = np.memmap(tf.name, dtype=np.float64)
score = np.memmap(tf.name, shape=(), mode='r', dtype=np.float64)
try:
cross_val_score(clf, X, y, scoring=lambda est, X, y: score)
# non-scalar should still fail
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=lambda est, X, y: scores)
finally:
# Best effort to release the mmap file handles before deleting the
# backing file under Windows
scores, score = None, None
for _ in range(3):
try:
os.unlink(tf.name)
break
except WindowsError:
sleep(1.)
def test_permutation_test_score_pandas():
# check permutation_test_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
iris = load_iris()
X, y = iris.data, iris.target
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
permutation_test_score(clf, X_df, y_ser)
| bsd-3-clause |
ud3sh/coursework | deeplearning.ai/coursera-improving-neural-networks/week2/Optimization_methods_v1b.py | 1 | 41625 |
# coding: utf-8
# # Optimization Methods
#
# Until now, you've always used Gradient Descent to update the parameters and minimize the cost. In this notebook, you will learn more advanced optimization methods that can speed up learning and perhaps even get you to a better final value for the cost function. Having a good optimization algorithm can be the difference between waiting days vs. just a few hours to get a good result.
#
# Gradient descent goes "downhill" on a cost function $J$. Think of it as trying to do this:
# <img src="images/cost.jpg" style="width:650px;height:300px;">
# <caption><center> <u> **Figure 1** </u>: **Minimizing the cost is like finding the lowest point in a hilly landscape**<br> At each step of the training, you update your parameters following a certain direction to try to get to the lowest possible point. </center></caption>
#
# **Notations**: As usual, $\frac{\partial J}{\partial a } = $ `da` for any variable `a`.
#
# To get started, run the following code to import the libraries you will need.
# ### <font color='darkblue'> Updates to Assignment <font>
#
# #### If you were working on a previous version
# * The current notebook filename is version "Optimization_methods_v1b".
# * You can find your work in the file directory as version "Optimization methods'.
# * To see the file directory, click on the Coursera logo at the top left of the notebook.
#
# #### List of Updates
# * op_utils is now opt_utils_v1a. Assertion statement in `initialize_parameters` is fixed.
# * opt_utils_v1a: `compute_cost` function now accumulates total cost of the batch without taking the average (average is taken for entire epoch instead).
# * In `model` function, the total cost per mini-batch is accumulated, and the average of the entire epoch is taken as the average cost. So the plot of the cost function over time is now a smooth downward curve instead of an oscillating curve.
# * Print statements used to check each function are reformatted, and 'expected output` is reformatted to match the format of the print statements (for easier visual comparisons).
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
import math
import sklearn
import sklearn.datasets
from opt_utils_v1a import load_params_and_grads, initialize_parameters, forward_propagation, backward_propagation
from opt_utils_v1a import compute_cost, predict, predict_dec, plot_decision_boundary, load_dataset
from testCases import *
get_ipython().magic('matplotlib inline')
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# ## 1 - Gradient Descent
#
# A simple optimization method in machine learning is gradient descent (GD). When you take gradient steps with respect to all $m$ examples on each step, it is also called Batch Gradient Descent.
#
# **Warm-up exercise**: Implement the gradient descent update rule. The gradient descent rule is, for $l = 1, ..., L$:
# $$ W^{[l]} = W^{[l]} - \alpha \text{ } dW^{[l]} \tag{1}$$
# $$ b^{[l]} = b^{[l]} - \alpha \text{ } db^{[l]} \tag{2}$$
#
# where L is the number of layers and $\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift `l` to `l+1` when coding.
# In[2]:
# GRADED FUNCTION: update_parameters_with_gd
def update_parameters_with_gd(parameters, grads, learning_rate):
"""
Update parameters using one step of gradient descent
Arguments:
parameters -- python dictionary containing your parameters to be updated:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients to update each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
learning_rate -- the learning rate, scalar.
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Update rule for each parameter
for l in range(L):
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate * grads['dW' + str(l+1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate * grads['db' + str(l+1)]
### END CODE HERE ###
return parameters
# In[3]:
parameters, grads, learning_rate = update_parameters_with_gd_test_case()
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
print("W1 =\n" + str(parameters["W1"]))
print("b1 =\n" + str(parameters["b1"]))
print("W2 =\n" + str(parameters["W2"]))
print("b2 =\n" + str(parameters["b2"]))
# **Expected Output**:
#
# ```
# W1 =
# [[ 1.63535156 -0.62320365 -0.53718766]
# [-1.07799357 0.85639907 -2.29470142]]
# b1 =
# [[ 1.74604067]
# [-0.75184921]]
# W2 =
# [[ 0.32171798 -0.25467393 1.46902454]
# [-2.05617317 -0.31554548 -0.3756023 ]
# [ 1.1404819 -1.09976462 -0.1612551 ]]
# b2 =
# [[-0.88020257]
# [ 0.02561572]
# [ 0.57539477]]
# ```
# A variant of this is Stochastic Gradient Descent (SGD), which is equivalent to mini-batch gradient descent where each mini-batch has just 1 example. The update rule that you have just implemented does not change. What changes is that you would be computing gradients on just one training example at a time, rather than on the whole training set. The code examples below illustrate the difference between stochastic gradient descent and (batch) gradient descent.
#
# - **(Batch) Gradient Descent**:
#
# ``` python
# X = data_input
# Y = labels
# parameters = initialize_parameters(layers_dims)
# for i in range(0, num_iterations):
# # Forward propagation
# a, caches = forward_propagation(X, parameters)
# # Compute cost.
# cost += compute_cost(a, Y)
# # Backward propagation.
# grads = backward_propagation(a, caches, parameters)
# # Update parameters.
# parameters = update_parameters(parameters, grads)
#
# ```
#
# - **Stochastic Gradient Descent**:
#
# ```python
# X = data_input
# Y = labels
# parameters = initialize_parameters(layers_dims)
# for i in range(0, num_iterations):
# for j in range(0, m):
# # Forward propagation
# a, caches = forward_propagation(X[:,j], parameters)
# # Compute cost
# cost += compute_cost(a, Y[:,j])
# # Backward propagation
# grads = backward_propagation(a, caches, parameters)
# # Update parameters.
# parameters = update_parameters(parameters, grads)
# ```
#
# In Stochastic Gradient Descent, you use only 1 training example before updating the gradients. When the training set is large, SGD can be faster. But the parameters will "oscillate" toward the minimum rather than converge smoothly. Here is an illustration of this:
#
# <img src="images/kiank_sgd.png" style="width:750px;height:250px;">
# <caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **SGD vs GD**<br> "+" denotes a minimum of the cost. SGD leads to many oscillations to reach convergence. But each step is a lot faster to compute for SGD than for GD, as it uses only one training example (vs. the whole batch for GD). </center></caption>
#
# **Note** also that implementing SGD requires 3 for-loops in total:
# 1. Over the number of iterations
# 2. Over the $m$ training examples
# 3. Over the layers (to update all parameters, from $(W^{[1]},b^{[1]})$ to $(W^{[L]},b^{[L]})$)
#
# In practice, you'll often get faster results if you do not use neither the whole training set, nor only one training example, to perform each update. Mini-batch gradient descent uses an intermediate number of examples for each step. With mini-batch gradient descent, you loop over the mini-batches instead of looping over individual training examples.
#
# <img src="images/kiank_minibatch.png" style="width:750px;height:250px;">
# <caption><center> <u> <font color='purple'> **Figure 2** </u>: <font color='purple'> **SGD vs Mini-Batch GD**<br> "+" denotes a minimum of the cost. Using mini-batches in your optimization algorithm often leads to faster optimization. </center></caption>
#
# <font color='blue'>
# **What you should remember**:
# - The difference between gradient descent, mini-batch gradient descent and stochastic gradient descent is the number of examples you use to perform one update step.
# - You have to tune a learning rate hyperparameter $\alpha$.
# - With a well-turned mini-batch size, usually it outperforms either gradient descent or stochastic gradient descent (particularly when the training set is large).
# ## 2 - Mini-Batch Gradient descent
#
# Let's learn how to build mini-batches from the training set (X, Y).
#
# There are two steps:
# - **Shuffle**: Create a shuffled version of the training set (X, Y) as shown below. Each column of X and Y represents a training example. Note that the random shuffling is done synchronously between X and Y. Such that after the shuffling the $i^{th}$ column of X is the example corresponding to the $i^{th}$ label in Y. The shuffling step ensures that examples will be split randomly into different mini-batches.
#
# <img src="images/kiank_shuffle.png" style="width:550px;height:300px;">
#
# - **Partition**: Partition the shuffled (X, Y) into mini-batches of size `mini_batch_size` (here 64). Note that the number of training examples is not always divisible by `mini_batch_size`. The last mini batch might be smaller, but you don't need to worry about this. When the final mini-batch is smaller than the full `mini_batch_size`, it will look like this:
#
# <img src="images/kiank_partition.png" style="width:550px;height:300px;">
#
# **Exercise**: Implement `random_mini_batches`. We coded the shuffling part for you. To help you with the partitioning step, we give you the following code that selects the indexes for the $1^{st}$ and $2^{nd}$ mini-batches:
# ```python
# first_mini_batch_X = shuffled_X[:, 0 : mini_batch_size]
# second_mini_batch_X = shuffled_X[:, mini_batch_size : 2 * mini_batch_size]
# ...
# ```
#
# Note that the last mini-batch might end up smaller than `mini_batch_size=64`. Let $\lfloor s \rfloor$ represents $s$ rounded down to the nearest integer (this is `math.floor(s)` in Python). If the total number of examples is not a multiple of `mini_batch_size=64` then there will be $\lfloor \frac{m}{mini\_batch\_size}\rfloor$ mini-batches with a full 64 examples, and the number of examples in the final mini-batch will be ($m-mini_\_batch_\_size \times \lfloor \frac{m}{mini\_batch\_size}\rfloor$).
# In[4]:
# GRADED FUNCTION: random_mini_batches
def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
mini_batch_size -- size of the mini-batches, integer
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
np.random.seed(seed) # To make your "random" minibatches the same as ours
m = X.shape[1] # number of training examples
mini_batches = []
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation].reshape((1,m))
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[:, mini_batch_size * k : mini_batch_size * (k + 1)]
mini_batch_Y = shuffled_Y[:, mini_batch_size * k : mini_batch_size * (k + 1)]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[:, mini_batch_size * k : m]
mini_batch_Y = shuffled_Y[:, mini_batch_size * k : m]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
# In[5]:
X_assess, Y_assess, mini_batch_size = random_mini_batches_test_case()
mini_batches = random_mini_batches(X_assess, Y_assess, mini_batch_size)
print ("shape of the 1st mini_batch_X: " + str(mini_batches[0][0].shape))
print ("shape of the 2nd mini_batch_X: " + str(mini_batches[1][0].shape))
print ("shape of the 3rd mini_batch_X: " + str(mini_batches[2][0].shape))
print ("shape of the 1st mini_batch_Y: " + str(mini_batches[0][1].shape))
print ("shape of the 2nd mini_batch_Y: " + str(mini_batches[1][1].shape))
print ("shape of the 3rd mini_batch_Y: " + str(mini_batches[2][1].shape))
print ("mini batch sanity check: " + str(mini_batches[0][0][0][0:3]))
# **Expected Output**:
#
# <table style="width:50%">
# <tr>
# <td > **shape of the 1st mini_batch_X** </td>
# <td > (12288, 64) </td>
# </tr>
#
# <tr>
# <td > **shape of the 2nd mini_batch_X** </td>
# <td > (12288, 64) </td>
# </tr>
#
# <tr>
# <td > **shape of the 3rd mini_batch_X** </td>
# <td > (12288, 20) </td>
# </tr>
# <tr>
# <td > **shape of the 1st mini_batch_Y** </td>
# <td > (1, 64) </td>
# </tr>
# <tr>
# <td > **shape of the 2nd mini_batch_Y** </td>
# <td > (1, 64) </td>
# </tr>
# <tr>
# <td > **shape of the 3rd mini_batch_Y** </td>
# <td > (1, 20) </td>
# </tr>
# <tr>
# <td > **mini batch sanity check** </td>
# <td > [ 0.90085595 -0.7612069 0.2344157 ] </td>
# </tr>
#
# </table>
# <font color='blue'>
# **What you should remember**:
# - Shuffling and Partitioning are the two steps required to build mini-batches
# - Powers of two are often chosen to be the mini-batch size, e.g., 16, 32, 64, 128.
# ## 3 - Momentum
#
# Because mini-batch gradient descent makes a parameter update after seeing just a subset of examples, the direction of the update has some variance, and so the path taken by mini-batch gradient descent will "oscillate" toward convergence. Using momentum can reduce these oscillations.
#
# Momentum takes into account the past gradients to smooth out the update. We will store the 'direction' of the previous gradients in the variable $v$. Formally, this will be the exponentially weighted average of the gradient on previous steps. You can also think of $v$ as the "velocity" of a ball rolling downhill, building up speed (and momentum) according to the direction of the gradient/slope of the hill.
#
# <img src="images/opt_momentum.png" style="width:400px;height:250px;">
# <caption><center> <u><font color='purple'>**Figure 3**</u><font color='purple'>: The red arrows shows the direction taken by one step of mini-batch gradient descent with momentum. The blue points show the direction of the gradient (with respect to the current mini-batch) on each step. Rather than just following the gradient, we let the gradient influence $v$ and then take a step in the direction of $v$.<br> <font color='black'> </center>
#
#
# **Exercise**: Initialize the velocity. The velocity, $v$, is a python dictionary that needs to be initialized with arrays of zeros. Its keys are the same as those in the `grads` dictionary, that is:
# for $l =1,...,L$:
# ```python
# v["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)])
# v["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)])
# ```
# **Note** that the iterator l starts at 0 in the for loop while the first parameters are v["dW1"] and v["db1"] (that's a "one" on the superscript). This is why we are shifting l to l+1 in the `for` loop.
# In[8]:
# GRADED FUNCTION: initialize_velocity
def initialize_velocity(parameters):
"""
Initializes the velocity as a python dictionary with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
Returns:
v -- python dictionary containing the current velocity.
v['dW' + str(l)] = velocity of dWl
v['db' + str(l)] = velocity of dbl
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
# Initialize velocity
for l in range(L):
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l+1)] = np.zeros(parameters["W" + str(l+1)].shape)
v["db" + str(l+1)] = np.zeros(parameters["b" + str(l+1)].shape)
### END CODE HERE ###
return v
# In[9]:
parameters = initialize_velocity_test_case()
v = initialize_velocity(parameters)
print("v[\"dW1\"] =\n" + str(v["dW1"]))
print("v[\"db1\"] =\n" + str(v["db1"]))
print("v[\"dW2\"] =\n" + str(v["dW2"]))
print("v[\"db2\"] =\n" + str(v["db2"]))
# **Expected Output**:
#
# ```
# v["dW1"] =
# [[ 0. 0. 0.]
# [ 0. 0. 0.]]
# v["db1"] =
# [[ 0.]
# [ 0.]]
# v["dW2"] =
# [[ 0. 0. 0.]
# [ 0. 0. 0.]
# [ 0. 0. 0.]]
# v["db2"] =
# [[ 0.]
# [ 0.]
# [ 0.]]
# ```
# **Exercise**: Now, implement the parameters update with momentum. The momentum update rule is, for $l = 1, ..., L$:
#
# $$ \begin{cases}
# v_{dW^{[l]}} = \beta v_{dW^{[l]}} + (1 - \beta) dW^{[l]} \\
# W^{[l]} = W^{[l]} - \alpha v_{dW^{[l]}}
# \end{cases}\tag{3}$$
#
# $$\begin{cases}
# v_{db^{[l]}} = \beta v_{db^{[l]}} + (1 - \beta) db^{[l]} \\
# b^{[l]} = b^{[l]} - \alpha v_{db^{[l]}}
# \end{cases}\tag{4}$$
#
# where L is the number of layers, $\beta$ is the momentum and $\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$ (that's a "one" on the superscript). So you will need to shift `l` to `l+1` when coding.
# In[10]:
# GRADED FUNCTION: update_parameters_with_momentum
def update_parameters_with_momentum(parameters, grads, v, beta, learning_rate):
"""
Update parameters using Momentum
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- python dictionary containing the current velocity:
v['dW' + str(l)] = ...
v['db' + str(l)] = ...
beta -- the momentum hyperparameter, scalar
learning_rate -- the learning rate, scalar
Returns:
parameters -- python dictionary containing your updated parameters
v -- python dictionary containing your updated velocities
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Momentum update for each parameter
for l in range(L):
### START CODE HERE ### (approx. 4 lines)
# compute velocities
v["dW" + str(l+1)] = beta * v["dW" + str(l+1)] + (1 - beta)* grads["dW" + str(l+1)]
v["db" + str(l+1)] = beta * v["db" + str(l+1)] + (1 - beta)* grads["db" + str(l+1)]
# update parameters
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate * v["dW" + str(l+1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate * v["db" + str(l+1)]
### END CODE HERE ###
return parameters, v
# In[11]:
parameters, grads, v = update_parameters_with_momentum_test_case()
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta = 0.9, learning_rate = 0.01)
print("W1 = \n" + str(parameters["W1"]))
print("b1 = \n" + str(parameters["b1"]))
print("W2 = \n" + str(parameters["W2"]))
print("b2 = \n" + str(parameters["b2"]))
print("v[\"dW1\"] = \n" + str(v["dW1"]))
print("v[\"db1\"] = \n" + str(v["db1"]))
print("v[\"dW2\"] = \n" + str(v["dW2"]))
print("v[\"db2\"] = v" + str(v["db2"]))
# **Expected Output**:
#
# ```
# W1 =
# [[ 1.62544598 -0.61290114 -0.52907334]
# [-1.07347112 0.86450677 -2.30085497]]
# b1 =
# [[ 1.74493465]
# [-0.76027113]]
# W2 =
# [[ 0.31930698 -0.24990073 1.4627996 ]
# [-2.05974396 -0.32173003 -0.38320915]
# [ 1.13444069 -1.0998786 -0.1713109 ]]
# b2 =
# [[-0.87809283]
# [ 0.04055394]
# [ 0.58207317]]
# v["dW1"] =
# [[-0.11006192 0.11447237 0.09015907]
# [ 0.05024943 0.09008559 -0.06837279]]
# v["db1"] =
# [[-0.01228902]
# [-0.09357694]]
# v["dW2"] =
# [[-0.02678881 0.05303555 -0.06916608]
# [-0.03967535 -0.06871727 -0.08452056]
# [-0.06712461 -0.00126646 -0.11173103]]
# v["db2"] = v[[ 0.02344157]
# [ 0.16598022]
# [ 0.07420442]]
# ```
# **Note** that:
# - The velocity is initialized with zeros. So the algorithm will take a few iterations to "build up" velocity and start to take bigger steps.
# - If $\beta = 0$, then this just becomes standard gradient descent without momentum.
#
# **How do you choose $\beta$?**
#
# - The larger the momentum $\beta$ is, the smoother the update because the more we take the past gradients into account. But if $\beta$ is too big, it could also smooth out the updates too much.
# - Common values for $\beta$ range from 0.8 to 0.999. If you don't feel inclined to tune this, $\beta = 0.9$ is often a reasonable default.
# - Tuning the optimal $\beta$ for your model might need trying several values to see what works best in term of reducing the value of the cost function $J$.
# <font color='blue'>
# **What you should remember**:
# - Momentum takes past gradients into account to smooth out the steps of gradient descent. It can be applied with batch gradient descent, mini-batch gradient descent or stochastic gradient descent.
# - You have to tune a momentum hyperparameter $\beta$ and a learning rate $\alpha$.
# ## 4 - Adam
#
# Adam is one of the most effective optimization algorithms for training neural networks. It combines ideas from RMSProp (described in lecture) and Momentum.
#
# **How does Adam work?**
# 1. It calculates an exponentially weighted average of past gradients, and stores it in variables $v$ (before bias correction) and $v^{corrected}$ (with bias correction).
# 2. It calculates an exponentially weighted average of the squares of the past gradients, and stores it in variables $s$ (before bias correction) and $s^{corrected}$ (with bias correction).
# 3. It updates parameters in a direction based on combining information from "1" and "2".
#
# The update rule is, for $l = 1, ..., L$:
#
# $$\begin{cases}
# v_{dW^{[l]}} = \beta_1 v_{dW^{[l]}} + (1 - \beta_1) \frac{\partial \mathcal{J} }{ \partial W^{[l]} } \\
# v^{corrected}_{dW^{[l]}} = \frac{v_{dW^{[l]}}}{1 - (\beta_1)^t} \\
# s_{dW^{[l]}} = \beta_2 s_{dW^{[l]}} + (1 - \beta_2) (\frac{\partial \mathcal{J} }{\partial W^{[l]} })^2 \\
# s^{corrected}_{dW^{[l]}} = \frac{s_{dW^{[l]}}}{1 - (\beta_2)^t} \\
# W^{[l]} = W^{[l]} - \alpha \frac{v^{corrected}_{dW^{[l]}}}{\sqrt{s^{corrected}_{dW^{[l]}}} + \varepsilon}
# \end{cases}$$
# where:
# - t counts the number of steps taken of Adam
# - L is the number of layers
# - $\beta_1$ and $\beta_2$ are hyperparameters that control the two exponentially weighted averages.
# - $\alpha$ is the learning rate
# - $\varepsilon$ is a very small number to avoid dividing by zero
#
# As usual, we will store all parameters in the `parameters` dictionary
# **Exercise**: Initialize the Adam variables $v, s$ which keep track of the past information.
#
# **Instruction**: The variables $v, s$ are python dictionaries that need to be initialized with arrays of zeros. Their keys are the same as for `grads`, that is:
# for $l = 1, ..., L$:
# ```python
# v["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)])
# v["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)])
# s["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)])
# s["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)])
#
# ```
# In[12]:
# GRADED FUNCTION: initialize_adam
def initialize_adam(parameters) :
"""
Initializes v and s as two python dictionaries with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters["W" + str(l)] = Wl
parameters["b" + str(l)] = bl
Returns:
v -- python dictionary that will contain the exponentially weighted average of the gradient.
v["dW" + str(l)] = ...
v["db" + str(l)] = ...
s -- python dictionary that will contain the exponentially weighted average of the squared gradient.
s["dW" + str(l)] = ...
s["db" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
s = {}
# Initialize v, s. Input: "parameters". Outputs: "v, s".
for l in range(L):
### START CODE HERE ### (approx. 4 lines)
v["dW" + str(l+1)] = np.zeros(parameters["W" + str(l+1)].shape)
v["db" + str(l+1)] = np.zeros(parameters["b" + str(l+1)].shape)
s["dW" + str(l+1)] = np.zeros(parameters["W" + str(l+1)].shape)
s["db" + str(l+1)] = np.zeros(parameters["b" + str(l+1)].shape)
### END CODE HERE ###
return v, s
# In[13]:
parameters = initialize_adam_test_case()
v, s = initialize_adam(parameters)
print("v[\"dW1\"] = \n" + str(v["dW1"]))
print("v[\"db1\"] = \n" + str(v["db1"]))
print("v[\"dW2\"] = \n" + str(v["dW2"]))
print("v[\"db2\"] = \n" + str(v["db2"]))
print("s[\"dW1\"] = \n" + str(s["dW1"]))
print("s[\"db1\"] = \n" + str(s["db1"]))
print("s[\"dW2\"] = \n" + str(s["dW2"]))
print("s[\"db2\"] = \n" + str(s["db2"]))
# **Expected Output**:
#
# ```
# v["dW1"] =
# [[ 0. 0. 0.]
# [ 0. 0. 0.]]
# v["db1"] =
# [[ 0.]
# [ 0.]]
# v["dW2"] =
# [[ 0. 0. 0.]
# [ 0. 0. 0.]
# [ 0. 0. 0.]]
# v["db2"] =
# [[ 0.]
# [ 0.]
# [ 0.]]
# s["dW1"] =
# [[ 0. 0. 0.]
# [ 0. 0. 0.]]
# s["db1"] =
# [[ 0.]
# [ 0.]]
# s["dW2"] =
# [[ 0. 0. 0.]
# [ 0. 0. 0.]
# [ 0. 0. 0.]]
# s["db2"] =
# [[ 0.]
# [ 0.]
# [ 0.]]
# ```
# **Exercise**: Now, implement the parameters update with Adam. Recall the general update rule is, for $l = 1, ..., L$:
#
# $$\begin{cases}
# v_{W^{[l]}} = \beta_1 v_{W^{[l]}} + (1 - \beta_1) \frac{\partial J }{ \partial W^{[l]} } \\
# v^{corrected}_{W^{[l]}} = \frac{v_{W^{[l]}}}{1 - (\beta_1)^t} \\
# s_{W^{[l]}} = \beta_2 s_{W^{[l]}} + (1 - \beta_2) (\frac{\partial J }{\partial W^{[l]} })^2 \\
# s^{corrected}_{W^{[l]}} = \frac{s_{W^{[l]}}}{1 - (\beta_2)^t} \\
# W^{[l]} = W^{[l]} - \alpha \frac{v^{corrected}_{W^{[l]}}}{\sqrt{s^{corrected}_{W^{[l]}}}+\varepsilon}
# \end{cases}$$
#
#
# **Note** that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift `l` to `l+1` when coding.
# In[18]:
# GRADED FUNCTION: update_parameters_with_adam
def update_parameters_with_adam(parameters, grads, v, s, t, learning_rate = 0.01,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8):
"""
Update parameters using Adam
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
learning_rate -- the learning rate, scalar.
beta1 -- Exponential decay hyperparameter for the first moment estimates
beta2 -- Exponential decay hyperparameter for the second moment estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
Returns:
parameters -- python dictionary containing your updated parameters
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
"""
L = len(parameters) // 2 # number of layers in the neural networks
v_corrected = {} # Initializing first moment estimate, python dictionary
s_corrected = {} # Initializing second moment estimate, python dictionary
# Perform Adam update on all parameters
for l in range(L):
# Moving average of the gradients. Inputs: "v, grads, beta1". Output: "v".
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l+1)] = beta1 * v["dW" + str(l+1)] + (1 - beta1)* grads["dW" + str(l+1)]
v["db" + str(l+1)] = beta1 * v["db" + str(l+1)] + (1 - beta1)* grads["db" + str(l+1)]
### END CODE HERE ###
# Compute bias-corrected first moment estimate. Inputs: "v, beta1, t". Output: "v_corrected".
### START CODE HERE ### (approx. 2 lines)
v_corrected["dW" + str(l+1)] = v["dW" + str(l+1)] / (1 - beta1 ** t)
v_corrected["db" + str(l+1)] = v["db" + str(l+1)] / (1 - beta1 ** t)
### END CODE HERE ###
# Moving average of the squared gradients. Inputs: "s, grads, beta2". Output: "s".
### START CODE HERE ### (approx. 2 lines)
s["dW" + str(l+1)] = beta2 * s["dW" + str(l+1)] + (1 - beta2) * (grads["dW" + str(l+1)]**2)
s["db" + str(l+1)] = beta2 * s["db" + str(l+1)] + (1 - beta2) * (grads["db" + str(l+1)]**2)
### END CODE HERE ###
# Compute bias-corrected second raw moment estimate. Inputs: "s, beta2, t". Output: "s_corrected".
### START CODE HERE ### (approx. 2 lines)
s_corrected["dW" + str(l+1)] = s["dW" + str(l+1)] / (1 - beta2 ** t)
s_corrected["db" + str(l+1)] = s["db" + str(l+1)] / (1 - beta2 ** t)
### END CODE HERE ###
# Update parameters. Inputs: "parameters, learning_rate, v_corrected, s_corrected, epsilon". Output: "parameters".
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate * (v_corrected["dW" + str(l+1)] / (np.sqrt(s_corrected["dW" + str(l+1)]) + epsilon))
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate * (v_corrected["db" + str(l+1)] / (np.sqrt(s_corrected["db" + str(l+1)]) + epsilon))
### END CODE HERE ###
return parameters, v, s
# In[19]:
parameters, grads, v, s = update_parameters_with_adam_test_case()
parameters, v, s = update_parameters_with_adam(parameters, grads, v, s, t = 2)
print("W1 = \n" + str(parameters["W1"]))
print("b1 = \n" + str(parameters["b1"]))
print("W2 = \n" + str(parameters["W2"]))
print("b2 = \n" + str(parameters["b2"]))
print("v[\"dW1\"] = \n" + str(v["dW1"]))
print("v[\"db1\"] = \n" + str(v["db1"]))
print("v[\"dW2\"] = \n" + str(v["dW2"]))
print("v[\"db2\"] = \n" + str(v["db2"]))
print("s[\"dW1\"] = \n" + str(s["dW1"]))
print("s[\"db1\"] = \n" + str(s["db1"]))
print("s[\"dW2\"] = \n" + str(s["dW2"]))
print("s[\"db2\"] = \n" + str(s["db2"]))
# **Expected Output**:
#
# ```
# W1 =
# [[ 1.63178673 -0.61919778 -0.53561312]
# [-1.08040999 0.85796626 -2.29409733]]
# b1 =
# [[ 1.75225313]
# [-0.75376553]]
# W2 =
# [[ 0.32648046 -0.25681174 1.46954931]
# [-2.05269934 -0.31497584 -0.37661299]
# [ 1.14121081 -1.09245036 -0.16498684]]
# b2 =
# [[-0.88529978]
# [ 0.03477238]
# [ 0.57537385]]
# v["dW1"] =
# [[-0.11006192 0.11447237 0.09015907]
# [ 0.05024943 0.09008559 -0.06837279]]
# v["db1"] =
# [[-0.01228902]
# [-0.09357694]]
# v["dW2"] =
# [[-0.02678881 0.05303555 -0.06916608]
# [-0.03967535 -0.06871727 -0.08452056]
# [-0.06712461 -0.00126646 -0.11173103]]
# v["db2"] =
# [[ 0.02344157]
# [ 0.16598022]
# [ 0.07420442]]
# s["dW1"] =
# [[ 0.00121136 0.00131039 0.00081287]
# [ 0.0002525 0.00081154 0.00046748]]
# s["db1"] =
# [[ 1.51020075e-05]
# [ 8.75664434e-04]]
# s["dW2"] =
# [[ 7.17640232e-05 2.81276921e-04 4.78394595e-04]
# [ 1.57413361e-04 4.72206320e-04 7.14372576e-04]
# [ 4.50571368e-04 1.60392066e-07 1.24838242e-03]]
# s["db2"] =
# [[ 5.49507194e-05]
# [ 2.75494327e-03]
# [ 5.50629536e-04]]
# ```
# You now have three working optimization algorithms (mini-batch gradient descent, Momentum, Adam). Let's implement a model with each of these optimizers and observe the difference.
# ## 5 - Model with different optimization algorithms
#
# Lets use the following "moons" dataset to test the different optimization methods. (The dataset is named "moons" because the data from each of the two classes looks a bit like a crescent-shaped moon.)
# In[20]:
train_X, train_Y = load_dataset()
# We have already implemented a 3-layer neural network. You will train it with:
# - Mini-batch **Gradient Descent**: it will call your function:
# - `update_parameters_with_gd()`
# - Mini-batch **Momentum**: it will call your functions:
# - `initialize_velocity()` and `update_parameters_with_momentum()`
# - Mini-batch **Adam**: it will call your functions:
# - `initialize_adam()` and `update_parameters_with_adam()`
# In[22]:
def model(X, Y, layers_dims, optimizer, learning_rate = 0.0007, mini_batch_size = 64, beta = 0.9,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8, num_epochs = 10000, print_cost = True):
"""
3-layer neural network model which can be run in different optimizer modes.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
layers_dims -- python list, containing the size of each layer
learning_rate -- the learning rate, scalar.
mini_batch_size -- the size of a mini batch
beta -- Momentum hyperparameter
beta1 -- Exponential decay hyperparameter for the past gradients estimates
beta2 -- Exponential decay hyperparameter for the past squared gradients estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
num_epochs -- number of epochs
print_cost -- True to print the cost every 1000 epochs
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(layers_dims) # number of layers in the neural networks
costs = [] # to keep track of the cost
t = 0 # initializing the counter required for Adam update
seed = 10 # For grading purposes, so that your "random" minibatches are the same as ours
m = X.shape[1] # number of training examples
# Initialize parameters
parameters = initialize_parameters(layers_dims)
# Initialize the optimizer
if optimizer == "gd":
pass # no initialization required for gradient descent
elif optimizer == "momentum":
v = initialize_velocity(parameters)
elif optimizer == "adam":
v, s = initialize_adam(parameters)
# Optimization loop
for i in range(num_epochs):
# Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch
seed = seed + 1
minibatches = random_mini_batches(X, Y, mini_batch_size, seed)
cost_total = 0
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# Forward propagation
a3, caches = forward_propagation(minibatch_X, parameters)
# Compute cost and add to the cost total
cost_total += compute_cost(a3, minibatch_Y)
# Backward propagation
grads = backward_propagation(minibatch_X, minibatch_Y, caches)
# Update parameters
if optimizer == "gd":
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
elif optimizer == "momentum":
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta, learning_rate)
elif optimizer == "adam":
t = t + 1 # Adam counter
parameters, v, s = update_parameters_with_adam(parameters, grads, v, s,
t, learning_rate, beta1, beta2, epsilon)
cost_avg = cost_total / m
# Print the cost every 1000 epoch
if print_cost and i % 1000 == 0:
print ("Cost after epoch %i: %f" %(i, cost_avg))
if print_cost and i % 100 == 0:
costs.append(cost_avg)
# plot the cost
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('epochs (per 100)')
plt.title("Learning rate = " + str(learning_rate))
plt.show()
return parameters
# You will now run this 3 layer neural network with each of the 3 optimization methods.
#
# ### 5.1 - Mini-batch Gradient descent
#
# Run the following code to see how the model does with mini-batch gradient descent.
# In[23]:
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "gd")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# ### 5.2 - Mini-batch gradient descent with momentum
#
# Run the following code to see how the model does with momentum. Because this example is relatively simple, the gains from using momemtum are small; but for more complex problems you might see bigger gains.
# In[24]:
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = "momentum")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Momentum optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# ### 5.3 - Mini-batch with Adam mode
#
# Run the following code to see how the model does with Adam.
# In[25]:
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "adam")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Adam optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# ### 5.4 - Summary
#
# <table>
# <tr>
# <td>
# **optimization method**
# </td>
# <td>
# **accuracy**
# </td>
# <td>
# **cost shape**
# </td>
#
# </tr>
# <td>
# Gradient descent
# </td>
# <td>
# 79.7%
# </td>
# <td>
# oscillations
# </td>
# <tr>
# <td>
# Momentum
# </td>
# <td>
# 79.7%
# </td>
# <td>
# oscillations
# </td>
# </tr>
# <tr>
# <td>
# Adam
# </td>
# <td>
# 94%
# </td>
# <td>
# smoother
# </td>
# </tr>
# </table>
#
# Momentum usually helps, but given the small learning rate and the simplistic dataset, its impact is almost negligeable. Also, the huge oscillations you see in the cost come from the fact that some minibatches are more difficult thans others for the optimization algorithm.
#
# Adam on the other hand, clearly outperforms mini-batch gradient descent and Momentum. If you run the model for more epochs on this simple dataset, all three methods will lead to very good results. However, you've seen that Adam converges a lot faster.
#
# Some advantages of Adam include:
# - Relatively low memory requirements (though higher than gradient descent and gradient descent with momentum)
# - Usually works well even with little tuning of hyperparameters (except $\alpha$)
# **References**:
#
# - Adam paper: https://arxiv.org/pdf/1412.6980.pdf
| unlicense |
chatcannon/numpy | numpy/core/code_generators/ufunc_docstrings.py | 2 | 93777 | """
Docstrings for generated ufuncs
The syntax is designed to look like the function add_newdoc is being
called from numpy.lib, but in this file add_newdoc puts the docstrings
in a dictionary. This dictionary is used in
numpy/core/code_generators/generate_umath.py to generate the docstrings
for the ufuncs in numpy.core at the C level when the ufuncs are created
at compile time.
"""
from __future__ import division, absolute_import, print_function
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc('numpy.core.umath', 'absolute',
"""
Calculate the absolute value element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. For complex input, ``a + ib``, the
absolute value is :math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
>>> np.absolute(1.2 + 1j)
1.5620499351813308
Plot the function over ``[-10, 10]``:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(start=-10, stop=10, num=101)
>>> plt.plot(x, np.absolute(x))
>>> plt.show()
Plot the function over the complex plane:
>>> xx = x + 1j * x[:, np.newaxis]
>>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10], cmap='gray')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'add',
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be added. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
add : ndarray or scalar
The sum of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` + `x2` in terms of array broadcasting.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
""")
add_newdoc('numpy.core.umath', 'arccos',
"""
Trigonometric inverse cosine, element-wise.
The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``.
Parameters
----------
x : array_like
`x`-coordinate on the unit circle.
For real arguments, the domain is [-1, 1].
out : ndarray, optional
Array of the same shape as `a`, to store results in. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The angle of the ray intersecting the unit circle at the given
`x`-coordinate in radians [0, pi]. If `x` is a scalar then a
scalar is returned, otherwise an array of the same shape as `x`
is returned.
See Also
--------
cos, arctan, arcsin, emath.arccos
Notes
-----
`arccos` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cos(z) = x`. The convention is to return
the angle `z` whose real part lies in `[0, pi]`.
For real-valued input data types, `arccos` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytic function that
has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse `cos` is also known as `acos` or cos^-1.
References
----------
M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arccos of 1 to be 0, and of -1 to be pi:
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
Plot arccos:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-1, 1, num=100)
>>> plt.plot(x, np.arccos(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arccosh',
"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array of the same shape as `x`, to store results in.
See `doc.ufuncs` (Section "Output arguments") for details.
Returns
-------
arccosh : ndarray
Array of the same shape as `x`.
See Also
--------
cosh, arcsinh, sinh, arctanh, tanh
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]` and the real part in
``[0, inf]``.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccosh` is a complex analytical function that
has a branch cut `[-inf, 1]` and is continuous from above on it.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arccosh
Examples
--------
>>> np.arccosh([np.e, 10.0])
array([ 1.65745445, 2.99322285])
>>> np.arccosh(1)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsin',
"""
Inverse sine, element-wise.
Parameters
----------
x : array_like
`y`-coordinate on the unit circle.
out : ndarray, optional
Array of the same shape as `x`, in which to store the results.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``. If `x` is a scalar, a scalar
is returned, otherwise an array.
See Also
--------
sin, cos, arccos, tan, arctan, arctan2, emath.arcsin
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arcsin` is a complex analytic function that
has, by convention, the branch cuts [-inf, -1] and [1, inf] and is
continuous from above on the former and from below on the latter.
The inverse sine is also known as `asin` or sin^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsinh',
"""
Inverse hyperbolic sine element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : ndarray
Array of of the same shape as `x`.
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
returns ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytical function that
has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from
the right on the former and from the left on the latter.
The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arcsinh
Examples
--------
>>> np.arcsinh(np.array([np.e, 10.0]))
array([ 1.72538256, 2.99822295])
""")
add_newdoc('numpy.core.umath', 'arctan',
"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : array_like
Input values. `arctan` is applied to each element of `x`.
Returns
-------
out : ndarray
Out has the same shape as `x`. Its real part is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
It is a scalar if `x` is a scalar.
See Also
--------
arctan2 : The "four quadrant" arctan of the angle formed by (`x`, `y`)
and the positive `x`-axis.
angle : Argument of complex values.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctan` is a complex analytic function that
has [`1j, infj`] and [`-1j, -infj`] as branch cuts, and is continuous
from the left on the former and from the right on the latter.
The inverse tangent is also known as `atan` or tan^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arctan of 0 to be 0, and of 1 to be pi/4:
>>> np.arctan([0, 1])
array([ 0. , 0.78539816])
>>> np.pi/4
0.78539816339744828
Plot arctan:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10)
>>> plt.plot(x, np.arctan(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arctan2',
"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : array_like, real-valued
`y`-coordinates.
x2 : array_like, real-valued
`x`-coordinates. `x2` must be broadcastable to match the shape of
`x1` or vice versa.
Returns
-------
angle : ndarray
Array of angles in radians, in the range ``[-pi, pi]``.
See Also
--------
arctan, tan, angle
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> np.arctan2([1., -1.], [0., 0.])
array([ 1.57079633, -1.57079633])
>>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
array([ 0. , 3.14159265, 0.78539816])
""")
add_newdoc('numpy.core.umath', '_arg',
"""
DO NOT USE, ONLY FOR TESTING
""")
add_newdoc('numpy.core.umath', 'arctanh',
"""
Inverse hyperbolic tangent element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Array of the same shape as `x`.
See Also
--------
emath.arctanh
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`. The convention is to return
the `z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctanh` is a complex analytical function
that has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arctanh
Examples
--------
>>> np.arctanh([0, -0.5])
array([ 0. , -0.54930614])
""")
add_newdoc('numpy.core.umath', 'bitwise_and',
"""
Compute the bit-wise AND of two arrays element-wise.
Computes the bit-wise AND of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``&``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
logical_and
bitwise_or
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise AND of 13 and 17 is
therefore ``000000001``, or 1:
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.binary_repr(12)
'1100'
>>> np.bitwise_and([14,3], 13)
array([12, 1])
>>> np.bitwise_and([11,7], [4,25])
array([0, 1])
>>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16]))
array([ 2, 4, 16])
>>> np.bitwise_and([True, True], [False, True])
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_or',
"""
Compute the bit-wise OR of two arrays element-wise.
Computes the bit-wise OR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``|``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
Result.
See Also
--------
logical_or
bitwise_and
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 has the binaray representation ``00001101``. Likewise,
16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is
then ``000111011``, or 29:
>>> np.bitwise_or(13, 16)
29
>>> np.binary_repr(29)
'11101'
>>> np.bitwise_or(32, 2)
34
>>> np.bitwise_or([33, 4], 1)
array([33, 5])
>>> np.bitwise_or([33, 4], [1, 2])
array([33, 6])
>>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
array([ 6, 5, 255])
>>> np.array([2, 5, 255]) | np.array([4, 4, 4])
array([ 6, 5, 255])
>>> np.bitwise_or(np.array([2, 5, 255, 2147483647L], dtype=np.int32),
... np.array([4, 4, 4, 2147483647L], dtype=np.int32))
array([ 6, 5, 255, 2147483647])
>>> np.bitwise_or([True, True], [False, True])
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_xor',
"""
Compute the bit-wise XOR of two arrays element-wise.
Computes the bit-wise XOR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``^``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
logical_xor
bitwise_and
bitwise_or
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise XOR of 13 and 17 is
therefore ``00011100``, or 28:
>>> np.bitwise_xor(13, 17)
28
>>> np.binary_repr(28)
'11100'
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor([31,3], 5)
array([26, 6])
>>> np.bitwise_xor([31,3], [5,6])
array([26, 5])
>>> np.bitwise_xor([True, True], [False, True])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'ceil',
"""
Return the ceiling of the input, element-wise.
The ceil of the scalar `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\\lceil x \\rceil`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : ndarray or scalar
The ceiling of each element in `x`, with `float` dtype.
See Also
--------
floor, trunc, rint
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'trunc',
"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : ndarray or scalar
The truncated value of each element in `x`.
See Also
--------
ceil, floor, rint
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'conjugate',
"""
Return the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the
sign of its imaginary part.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The complex conjugate of `x`, with same dtype as `y`.
Examples
--------
>>> np.conjugate(1+2j)
(1-2j)
>>> x = np.eye(2) + 1j * np.eye(2)
>>> np.conjugate(x)
array([[ 1.-1.j, 0.-0.j],
[ 0.-0.j, 1.-1.j]])
""")
add_newdoc('numpy.core.umath', 'cos',
"""
Cosine element-wise.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding cosine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
>>>
>>> # Example of providing the optional output parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'cosh',
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Output array of same shape as `x`.
Examples
--------
>>> np.cosh(0)
1.0
The hyperbolic cosine describes the shape of a hanging cable:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 1000)
>>> plt.plot(x, np.cosh(x))
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'degrees',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as x.
Returns
-------
y : ndarray of floats
The corresponding degree values; if `out` was supplied this is a
reference to it.
See Also
--------
rad2deg : equivalent function
Examples
--------
Convert a radian array to degrees
>>> rad = np.arange(12.)*np.pi/6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240.,
270., 300., 330.])
>>> out = np.zeros((rad.shape))
>>> r = degrees(rad, out)
>>> np.all(r == out)
True
""")
add_newdoc('numpy.core.umath', 'rad2deg',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Angle in radians.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The corresponding angle in degrees.
See Also
--------
deg2rad : Convert angles from degrees to radians.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
rad2deg(x) is ``180 * x / pi``.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
""")
add_newdoc('numpy.core.umath', 'heaviside',
"""
Compute the Heaviside step function.
The Heaviside step function is defined as::
0 if x < 0
heaviside(x, h0) = h0 if x == 0
1 if x > 0
where `h0` is often taken to be 0.5, but 0 and 1 are also sometimes used.
Parameters
----------
x : array_like
Input values.
h0 : array_like
The value of the function at x = 0.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : ndarray
The output array, element-wise Heaviside step function of `x`.
Notes
-----
.. versionadded:: 1.13.0
References
----------
.. Wikipedia, "Heaviside step function",
https://en.wikipedia.org/wiki/Heaviside_step_function
Examples
--------
>>> np.heaviside([-1.5, 0, 2.0], 0.5)
array([ 0. , 0.5, 1. ])
>>> np.heaviside([-1.5, 0, 2.0], 1)
array([ 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'divide',
"""
Divide arguments element-wise.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray or scalar
The quotient ``x1/x2``, element-wise. Returns a scalar if
both ``x1`` and ``x2`` are scalars.
See Also
--------
seterr : Set whether to raise or warn on overflow, underflow and
division by zero.
Notes
-----
Equivalent to ``x1`` / ``x2`` in terms of array-broadcasting.
Behavior on division by zero can be changed using ``seterr``.
In Python 2, when both ``x1`` and ``x2`` are of an integer type,
``divide`` will behave like ``floor_divide``. In Python 3, it behaves
like ``true_divide``.
Examples
--------
>>> np.divide(2.0, 4.0)
0.5
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.divide(x1, x2)
array([[ NaN, 1. , 1. ],
[ Inf, 4. , 2.5],
[ Inf, 7. , 4. ]])
Note the behavior with integer types (Python 2 only):
>>> np.divide(2, 4)
0
>>> np.divide(2, 4.)
0.5
Division by zero always yields zero in integer arithmetic (again,
Python 2 only), and does not raise an exception or a warning:
>>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int))
array([0, 0])
Division by zero can, however, be caught using ``seterr``:
>>> old_err_state = np.seterr(divide='raise')
>>> np.divide(1, 0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: divide by zero encountered in divide
>>> ignored_states = np.seterr(**old_err_state)
>>> np.divide(1, 0)
0
""")
add_newdoc('numpy.core.umath', 'equal',
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays of the same shape.
Returns
-------
out : ndarray or bool
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal([0, 1, 3], np.arange(3))
array([ True, True, False], dtype=bool)
What is compared are values, not types. So an int (1) and an array of
length one can evaluate as True:
>>> np.equal(1, np.ones(1))
array([ True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'exp',
"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Output array, element-wise exponential of `x`.
See Also
--------
expm1 : Calculate ``exp(x) - 1`` for all elements in the array.
exp2 : Calculate ``2**x`` for all elements in the array.
Notes
-----
The irrational number ``e`` is also known as Euler's number. It is
approximately 2.718281, and is the base of the natural logarithm,
``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`,
then :math:`e^x = y`. For real input, ``exp(x)`` is always positive.
For complex arguments, ``x = a + ib``, we can write
:math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already
known (it is the real argument, described above). The second term,
:math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with
magnitude 1 and a periodic phase.
References
----------
.. [1] Wikipedia, "Exponential function",
http://en.wikipedia.org/wiki/Exponential_function
.. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions
with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69,
http://www.math.sfu.ca/~cbm/aands/page_69.htm
Examples
--------
Plot the magnitude and phase of ``exp(x)`` in the complex plane:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2*np.pi, 2*np.pi, 100)
>>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane
>>> out = np.exp(xx)
>>> plt.subplot(121)
>>> plt.imshow(np.abs(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi], cmap='gray')
>>> plt.title('Magnitude of exp(x)')
>>> plt.subplot(122)
>>> plt.imshow(np.angle(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi], cmap='hsv')
>>> plt.title('Phase (angle) of exp(x)')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'exp2',
"""
Calculate `2**p` for all `p` in the input array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array to insert results into.
Returns
-------
out : ndarray
Element-wise 2 to the power `x`.
See Also
--------
power
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> np.exp2([2, 3])
array([ 4., 8.])
""")
add_newdoc('numpy.core.umath', 'expm1',
"""
Calculate ``exp(x) - 1`` for all elements in the array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Element-wise exponential minus one: ``out = exp(x) - 1``.
See Also
--------
log1p : ``log(1 + x)``, the inverse of expm1.
Notes
-----
This function provides greater precision than ``exp(x) - 1``
for small values of ``x``.
Examples
--------
The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to
about 32 significant digits. This example shows the superiority of
expm1 in this case.
>>> np.expm1(1e-10)
1.00000000005e-10
>>> np.exp(1e-10) - 1
1.000000082740371e-10
""")
add_newdoc('numpy.core.umath', 'fabs',
"""
Compute the absolute values element-wise.
This function returns the absolute values (positive magnitude) of the
data in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : array_like
The array of numbers for which the absolute values are required. If
`x` is a scalar, the result `y` will also be a scalar.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray or scalar
The absolute values of `x`, the returned values are always floats.
See Also
--------
absolute : Absolute values including `complex` types.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs([-1.2, 1.2])
array([ 1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'floor',
"""
Return the floor of the input, element-wise.
The floor of the scalar `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : ndarray or scalar
The floor of each element in `x`.
See Also
--------
ceil, trunc, rint
Notes
-----
Some spreadsheet programs calculate the "floor-towards-zero", in other
words ``floor(-2.5) == -2``. NumPy instead uses the definition of
`floor` where `floor(-2.5) == -3`.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'floor_divide',
"""
Return the largest integer smaller or equal to the division of the inputs.
It is equivalent to the Python ``//`` operator and pairs with the
Python ``%`` (`remainder`), function so that ``b = a % b + b * (a // b)``
up to roundoff.
Parameters
----------
x1 : array_like
Numerator.
x2 : array_like
Denominator.
Returns
-------
y : ndarray
y = floor(`x1`/`x2`)
See Also
--------
remainder : Remainder complementary to floor_divide.
divide : Standard division.
floor : Round a number to the nearest integer toward minus infinity.
ceil : Round a number to the nearest integer toward infinity.
Examples
--------
>>> np.floor_divide(7,3)
2
>>> np.floor_divide([1., 2., 3., 4.], 2.5)
array([ 0., 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'fmod',
"""
Return the element-wise remainder of division.
This is the NumPy implementation of the C library function fmod, the
remainder has the same sign as the dividend `x1`. It is equivalent to
the Matlab(TM) ``rem`` function and should not be confused with the
Python modulus operator ``x1 % x2``.
Parameters
----------
x1 : array_like
Dividend.
x2 : array_like
Divisor.
Returns
-------
y : array_like
The remainder of the division of `x1` by `x2`.
See Also
--------
remainder : Equivalent to the Python ``%`` operator.
divide
Notes
-----
The result of the modulo operation for negative dividend and divisors
is bound by conventions. For `fmod`, the sign of result is the sign of
the dividend, while for `remainder` the sign of the result is the sign
of the divisor. The `fmod` function is equivalent to the Matlab(TM)
``rem`` function.
Examples
--------
>>> np.fmod([-3, -2, -1, 1, 2, 3], 2)
array([-1, 0, -1, 1, 0, 1])
>>> np.remainder([-3, -2, -1, 1, 2, 3], 2)
array([1, 0, 1, 1, 0, 1])
>>> np.fmod([5, 3], [2, 2.])
array([ 1., 1.])
>>> a = np.arange(-3, 3).reshape(3, 2)
>>> a
array([[-3, -2],
[-1, 0],
[ 1, 2]])
>>> np.fmod(a, [2,2])
array([[-1, 0],
[-1, 0],
[ 1, 0]])
""")
add_newdoc('numpy.core.umath', 'greater',
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater_equal, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater([4,2],[2,2])
array([ True, False], dtype=bool)
If the inputs are ndarrays, then np.greater is equivalent to '>'.
>>> a = np.array([4,2])
>>> b = np.array([2,2])
>>> a > b
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'greater_equal',
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater_equal([4, 2, 1], [2, 2, 2])
array([ True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'hypot',
"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
(See Examples)
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
""")
add_newdoc('numpy.core.umath', 'invert',
"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
For signed integer inputs, the two's complement is returned. In a
two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit
two's-complement system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
x1 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> np.invert(np.array([13], dtype=uint8))
array([242], dtype=uint8)
>>> np.binary_repr(x, width=8)
'00001101'
>>> np.binary_repr(242, width=8)
'11110010'
The result depends on the bit-width:
>>> np.invert(np.array([13], dtype=uint16))
array([65522], dtype=uint16)
>>> np.binary_repr(x, width=16)
'0000000000001101'
>>> np.binary_repr(65522, width=16)
'1111111111110010'
When using signed integer types the result is the two's complement of
the result for the unsigned type:
>>> np.invert(np.array([13], dtype=int8))
array([-14], dtype=int8)
>>> np.binary_repr(-14, width=8)
'11110010'
Booleans are accepted as well:
>>> np.invert(array([True, False]))
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'isfinite',
"""
Test element-wise for finiteness (not infinity or not Not a Number).
The result is returned as a boolean array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
y : ndarray, bool
For scalar input, the result is a new boolean with value True
if the input is finite; otherwise the value is False (input is
either positive infinity, negative infinity or Not a Number).
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the
corresponding element of the input is finite; otherwise the values
are False (element is either positive infinity, negative infinity
or Not a Number).
See Also
--------
isinf, isneginf, isposinf, isnan
Notes
-----
Not a Number, positive infinity and negative infinity are considered
to be non-finite.
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity. Errors result if the
second argument is also supplied when `x` is a scalar input, or if
first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(np.NINF)
False
>>> np.isfinite([np.log(-1.),1.,np.log(0)])
array([False, True, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isfinite(x, y)
array([0, 1, 0])
>>> y
array([0, 1, 0])
""")
add_newdoc('numpy.core.umath', 'isinf',
"""
Test element-wise for positive or negative infinity.
Returns a boolean array of the same shape as `x`, True where ``x ==
+/-inf``, otherwise False.
Parameters
----------
x : array_like
Input values
out : array_like, optional
An array with the same shape as `x` to store the result.
Returns
-------
y : bool (scalar) or boolean ndarray
For scalar input, the result is a new boolean with value True if
the input is positive or negative infinity; otherwise the value is
False.
For array input, the result is a boolean array with the same shape
as the input and the values are True where the corresponding
element of the input is positive or negative infinity; elsewhere
the values are False. If a second argument was supplied the result
is stored there. If the type of that array is a numeric type the
result is represented as zeros and ones, if the type is boolean
then as False and True, respectively. The return value `y` is then
a reference to that array.
See Also
--------
isneginf, isposinf, isnan, isfinite
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is supplied when the first
argument is a scalar, or if the first and second arguments have
different shapes.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.NINF)
True
>>> np.isinf([np.inf, -np.inf, 1.0, np.nan])
array([ True, True, False, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isinf(x, y)
array([1, 0, 1])
>>> y
array([1, 0, 1])
""")
add_newdoc('numpy.core.umath', 'isnan',
"""
Test element-wise for NaN and return result as a boolean array.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : ndarray or bool
For scalar input, the result is a new boolean with value True if
the input is NaN; otherwise the value is False.
For array input, the result is a boolean array of the same
dimensions as the input and the values are True if the
corresponding element of the input is NaN; otherwise the values are
False.
See Also
--------
isinf, isneginf, isposinf, isfinite
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan([np.log(-1.),1.,np.log(0)])
array([ True, False, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'left_shift',
"""
Shift the bits of an integer to the left.
Bits are shifted to the left by appending `x2` 0s at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to multiplying `x1` by ``2**x2``.
Parameters
----------
x1 : array_like of integer type
Input values.
x2 : array_like of integer type
Number of zeros to append to `x1`. Has to be non-negative.
Returns
-------
out : array of integer type
Return `x1` with bits shifted `x2` times to the left.
See Also
--------
right_shift : Shift the bits of an integer to the right.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(5)
'101'
>>> np.left_shift(5, 2)
20
>>> np.binary_repr(20)
'10100'
>>> np.left_shift(5, [1,2,3])
array([10, 20, 40])
""")
add_newdoc('numpy.core.umath', 'less',
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less_equal, greater_equal, equal, not_equal
Examples
--------
>>> np.less([1, 2], [2, 2])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'less_equal',
"""
Return the truth value of (x1 =< x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, greater_equal, equal, not_equal
Examples
--------
>>> np.less_equal([4, 2, 1], [2, 2, 2])
array([False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'log',
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base
`e`.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
See Also
--------
log10, log2, log1p, emath.log
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log([1, np.e, np.e**2, 0])
array([ 0., 1., 2., -Inf])
""")
add_newdoc('numpy.core.umath', 'log10',
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative.
See Also
--------
emath.log10
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `10**z = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log10` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log10` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it.
`log10` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log10([1e-15, -3.])
array([-15., NaN])
""")
add_newdoc('numpy.core.umath', 'log2',
"""
Base-2 logarithm of `x`.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Base-2 logarithm of `x`.
See Also
--------
log, log10, log1p, emath.log2
Notes
-----
.. versionadded:: 1.3.0
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `2**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log2` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log2` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log2`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-Inf, 0., 1., 4.])
>>> xi = np.array([0+1.j, 1, 2+0.j, 4.j])
>>> np.log2(xi)
array([ 0.+2.26618007j, 0.+0.j , 1.+0.j , 2.+2.26618007j])
""")
add_newdoc('numpy.core.umath', 'logaddexp',
"""
Logarithm of the sum of exponentiations of the inputs.
Calculates ``log(exp(x1) + exp(x2))``. This function is useful in
statistics where the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the logarithm of the calculated probability is stored. This function
allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
Returns
-------
result : ndarray
Logarithm of ``exp(x1) + exp(x2)``.
See Also
--------
logaddexp2: Logarithm of the sum of exponentiations of inputs in base 2.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log(1e-50)
>>> prob2 = np.log(2.5e-50)
>>> prob12 = np.logaddexp(prob1, prob2)
>>> prob12
-113.87649168120691
>>> np.exp(prob12)
3.5000000000000057e-50
""")
add_newdoc('numpy.core.umath', 'logaddexp2',
"""
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine
learning when the calculated probabilities of events may be so small as
to exceed the range of normal floating point numbers. In such cases
the base-2 logarithm of the calculated probability can be used instead.
This function allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
out : ndarray, optional
Array to store results in.
Returns
-------
result : ndarray
Base-2 logarithm of ``2**x1 + 2**x2``.
See Also
--------
logaddexp: Logarithm of the sum of exponentiations of the inputs.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log2(1e-50)
>>> prob2 = np.log2(2.5e-50)
>>> prob12 = np.logaddexp2(prob1, prob2)
>>> prob1, prob2, prob12
(-166.09640474436813, -164.77447664948076, -164.28904982231052)
>>> 2**prob12
3.4999999999999914e-50
""")
add_newdoc('numpy.core.umath', 'log1p',
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Natural logarithm of `1 + x`, element-wise.
See Also
--------
expm1 : ``exp(x) - 1``, the inverse of `log1p`.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log1p` is a complex analytical function that
has a branch cut `[-inf, -1]` and is continuous from above on it.
`log1p` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> np.log(1 + 1e-99)
0.0
""")
add_newdoc('numpy.core.umath', 'logical_and',
"""
Compute the truth value of x1 AND x2 element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. `x1` and `x2` must be of the same shape.
Returns
-------
y : ndarray or bool
Boolean result with the same shape as `x1` and `x2` of the logical
AND operation on corresponding elements of `x1` and `x2`.
See Also
--------
logical_or, logical_not, logical_xor
bitwise_and
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and([True, False], [False, False])
array([False, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_and(x>1, x<4)
array([False, False, True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_not',
"""
Compute the truth value of NOT x element-wise.
Parameters
----------
x : array_like
Logical NOT is applied to the elements of `x`.
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
See Also
--------
logical_and, logical_or, logical_xor
Examples
--------
>>> np.logical_not(3)
False
>>> np.logical_not([True, False, 0, 1])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_or',
"""
Compute the truth value of x1 OR x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
They have to be of the same shape.
Returns
-------
y : ndarray or bool
Boolean result with the same shape as `x1` and `x2` of the logical
OR operation on elements of `x1` and `x2`.
See Also
--------
logical_and, logical_not, logical_xor
bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or([True, False], [False, False])
array([ True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_or(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_xor',
"""
Compute the truth value of x1 XOR x2, element-wise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`. They must
be broadcastable to the same shape.
Returns
-------
y : bool or ndarray of bool
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by whether or not
broadcasting of one or both arrays was required.
See Also
--------
logical_and, logical_or, logical_not, bitwise_xor
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor([True, True, False, False], [True, False, True, False])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_xor(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
Simple example showing support of broadcasting
>>> np.logical_xor(0, np.eye(2))
array([[ True, False],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'maximum',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
minimum :
Element-wise minimum of two arrays, propagates NaNs.
fmax :
Element-wise maximum of two arrays, ignores NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
fmin, amin, nanmin
Notes
-----
The maximum is equivalent to ``np.where(x1 >= x2, x1, x2)`` when
neither x1 nor x2 are nans, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.maximum([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.maximum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.maximum(np.Inf, 1)
inf
""")
add_newdoc('numpy.core.umath', 'minimum',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
maximum :
Element-wise maximum of two arrays, propagates NaNs.
fmin :
Element-wise minimum of two arrays, ignores NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
fmax, amax, nanmax
Notes
-----
The minimum is equivalent to ``np.where(x1 <= x2, x1, x2)`` when
neither x1 nor x2 are NaNs, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.minimum([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.minimum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.minimum(-np.Inf, 1)
-inf
""")
add_newdoc('numpy.core.umath', 'fmax',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmin :
Element-wise minimum of two arrays, ignores NaNs.
maximum :
Element-wise maximum of two arrays, propagates NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
minimum, amin, nanmin
Notes
-----
.. versionadded:: 1.3.0
The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmax([2, 3, 4], [1, 5, 2])
array([ 2., 5., 4.])
>>> np.fmax(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'fmin',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmax :
Element-wise maximum of two arrays, ignores NaNs.
minimum :
Element-wise minimum of two arrays, propagates NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
maximum, amax, nanmax
Notes
-----
.. versionadded:: 1.3.0
The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmin([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.fmin(np.eye(2), [0.5, 2])
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'modf',
"""
Return the fractional and integral parts of an array, element-wise.
The fractional and integral parts are negative if the given number is
negative.
Parameters
----------
x : array_like
Input array.
Returns
-------
y1 : ndarray
Fractional part of `x`.
y2 : ndarray
Integral part of `x`.
Notes
-----
For integer input the return values are floats.
Examples
--------
>>> np.modf([0, 3.5])
(array([ 0. , 0.5]), array([ 0., 3.]))
>>> np.modf(-0.5)
(-0.5, -0)
""")
add_newdoc('numpy.core.umath', 'multiply',
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays to be multiplied.
Returns
-------
y : ndarray
The product of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` * `x2` in terms of array broadcasting.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
""")
add_newdoc('numpy.core.umath', 'negative',
"""
Numerical negative, element-wise.
Parameters
----------
x : array_like or scalar
Input array.
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = -x`.
Examples
--------
>>> np.negative([1.,-1.])
array([-1., 1.])
""")
add_newdoc('numpy.core.umath', 'not_equal',
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
out : ndarray, optional
A placeholder the same shape as `x1` to store the result.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
not_equal : ndarray bool, scalar bool
For each element in `x1, x2`, return True if `x1` is not equal
to `x2` and False otherwise.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal([1.,2.], [1., 3.])
array([False, True], dtype=bool)
>>> np.not_equal([1, 2], [[1, 3],[1, 4]])
array([[False, True],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', '_ones_like',
"""
This function used to be the numpy.ones_like, but now a specific
function for that has been written for consistency with the other
*_like functions. It is only used internally in a limited fashion now.
See Also
--------
ones_like
""")
add_newdoc('numpy.core.umath', 'power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in
`x2`. `x1` and `x2` must be broadcastable to the same shape. Note that an
integer type raised to a negative integer power will raise a ValueError.
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
See Also
--------
float_power : power function that promotes integers to float
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.power(x1, 3)
array([ 0, 1, 8, 27, 64, 125])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.power(x1, x2)
array([[ 0, 1, 8, 27, 16, 5],
[ 0, 1, 8, 27, 16, 5]])
""")
add_newdoc('numpy.core.umath', 'float_power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in `x2`.
`x1` and `x2` must be broadcastable to the same shape. This differs from
the power function in that integers, float16, and float32 are promoted to
floats with a minimum precision of float64 so that the result is always
inexact. The intent is that the function will return a usable result for
negative powers and seldom overflow for positive powers.
.. versionadded:: 1.12.0
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
See Also
--------
power : power function that preserves type
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.float_power(x1, 3)
array([ 0., 1., 8., 27., 64., 125.])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.float_power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.float_power(x1, x2)
array([[ 0., 1., 8., 27., 16., 5.],
[ 0., 1., 8., 27., 16., 5.]])
""")
add_newdoc('numpy.core.umath', 'radians',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Input array in degrees.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding radian values.
See Also
--------
deg2rad : equivalent function
Examples
--------
Convert a degree array to radians
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 ,
2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898,
5.23598776, 5.75958653])
>>> out = np.zeros((deg.shape))
>>> ret = np.radians(deg, out)
>>> ret is out
True
""")
add_newdoc('numpy.core.umath', 'deg2rad',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Angles in degrees.
Returns
-------
y : ndarray
The corresponding angle in radians.
See Also
--------
rad2deg : Convert angles from radians to degrees.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
``deg2rad(x)`` is ``x * pi / 180``.
Examples
--------
>>> np.deg2rad(180)
3.1415926535897931
""")
add_newdoc('numpy.core.umath', 'reciprocal',
"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : ndarray
Return array.
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division. For
integer zero the result is an overflow.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> np.reciprocal([1, 2., 3.33])
array([ 1. , 0.5 , 0.3003003])
""")
add_newdoc('numpy.core.umath', 'remainder',
"""
Return element-wise remainder of division.
Computes the remainder complementary to the `floor_divide` function. It is
equivalent to the Python modulus operator``x1 % x2`` and has the same sign
as the divisor `x2`. It should not be confused with the Matlab(TM) ``rem``
function.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The element-wise remainder of the quotient ``floor_divide(x1, x2)``.
Returns a scalar if both `x1` and `x2` are scalars.
See Also
--------
floor_divide : Equivalent of Python ``//`` operator.
fmod : Equivalent of the Matlab(TM) ``rem`` function.
divide, floor
Notes
-----
Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of)
integers.
Examples
--------
>>> np.remainder([4, 7], [2, 3])
array([0, 1])
>>> np.remainder(np.arange(7), 5)
array([0, 1, 2, 3, 4, 0, 1])
""")
add_newdoc('numpy.core.umath', 'right_shift',
"""
Shift the bits of an integer to the right.
Bits are shifted to the right `x2`. Because the internal
representation of numbers is in binary format, this operation is
equivalent to dividing `x1` by ``2**x2``.
Parameters
----------
x1 : array_like, int
Input values.
x2 : array_like, int
Number of bits to remove at the right of `x1`.
Returns
-------
out : ndarray, int
Return `x1` with bits shifted `x2` times to the right.
See Also
--------
left_shift : Shift the bits of an integer to the left.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(10)
'1010'
>>> np.right_shift(10, 1)
5
>>> np.binary_repr(5)
'101'
>>> np.right_shift(10, [1,2,3])
array([5, 2, 1])
""")
add_newdoc('numpy.core.umath', 'rint',
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray or scalar
Output array is same shape and type as `x`.
See Also
--------
ceil, floor, trunc
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'sign',
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. nan
is returned for nan inputs.
For complex inputs, the `sign` function returns
``sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j``.
complex(nan, 0) is returned for complex nan inputs.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The sign of `x`.
Notes
-----
There is more than one definition of sign in common use for complex
numbers. The definition used here is equivalent to :math:`x/\\sqrt{x*x}`
which is different from a common alternative, :math:`x/|x|`.
Examples
--------
>>> np.sign([-5., 4.5])
array([-1., 1.])
>>> np.sign(0)
0
>>> np.sign(5-2j)
(1+0j)
""")
add_newdoc('numpy.core.umath', 'signbit',
"""
Returns element-wise True where signbit is set (less than zero).
Parameters
----------
x : array_like
The input value(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
result : ndarray of bool
Output array, or reference to `out` if that was supplied.
Examples
--------
>>> np.signbit(-1.2)
True
>>> np.signbit(np.array([1, -2.3, 2.1]))
array([False, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'copysign',
"""
Change the sign of x1 to that of x2, element-wise.
If both arguments are arrays or sequences, they have to be of the same
length. If `x2` is a scalar, its sign will be copied to all elements of
`x1`.
Parameters
----------
x1 : array_like
Values to change the sign of.
x2 : array_like
The sign of `x2` is copied to `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
The values of `x1` with the sign of `x2`.
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> np.copysign([-1, 0, 1], -1.1)
array([-1., -0., -1.])
>>> np.copysign([-1, 0, 1], np.arange(3)-1)
array([-1., 0., 1.])
""")
add_newdoc('numpy.core.umath', 'nextafter',
"""
Return the next floating-point value after x1 towards x2, element-wise.
Parameters
----------
x1 : array_like
Values to find the next representable value of.
x2 : array_like
The direction where to look for the next representable value of `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : array_like
The next representable values of `x1` in the direction of `x2`.
Examples
--------
>>> eps = np.finfo(np.float64).eps
>>> np.nextafter(1, 2) == eps + 1
True
>>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps]
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'spacing',
"""
Return the distance between x and the nearest adjacent number.
Parameters
----------
x1 : array_like
Values to find the spacing of.
Returns
-------
out : array_like
The spacing of values of `x1`.
Notes
-----
It can be considered as a generalization of EPS:
``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there
should not be any representable number between ``x + spacing(x)`` and
x for any finite x.
Spacing of +- inf and NaN is NaN.
Examples
--------
>>> np.spacing(1) == np.finfo(np.float64).eps
True
""")
add_newdoc('numpy.core.umath', 'sin',
"""
Trigonometric sine, element-wise.
Parameters
----------
x : array_like
Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).
Returns
-------
y : array_like
The sine of each element of x.
See Also
--------
arcsin, sinh, cos
Notes
-----
The sine is one of the fundamental functions of trigonometry (the
mathematical study of triangles). Consider a circle of radius 1
centered on the origin. A ray comes in from the :math:`+x` axis, makes
an angle at the origin (measured counter-clockwise from that axis), and
departs from the origin. The :math:`y` coordinate of the outgoing
ray's intersection with the unit circle is the sine of that angle. It
ranges from -1 for :math:`x=3\\pi / 2` to +1 for :math:`\\pi / 2.` The
function has zeroes where the angle is a multiple of :math:`\\pi`.
Sines of angles between :math:`\\pi` and :math:`2\\pi` are negative.
The numerous properties of the sine and related functions are included
in any standard trigonometry text.
Examples
--------
Print sine of one angle:
>>> np.sin(np.pi/2.)
1.0
Print sines of an array of angles given in degrees:
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. )
array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ])
Plot the sine function:
>>> import matplotlib.pylab as plt
>>> x = np.linspace(-np.pi, np.pi, 201)
>>> plt.plot(x, np.sin(x))
>>> plt.xlabel('Angle [rad]')
>>> plt.ylabel('sin(x)')
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'sinh',
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or
``-1j * np.sin(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic sine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
Examples
--------
>>> np.sinh(0)
0.0
>>> np.sinh(np.pi*1j/2)
1j
>>> np.sinh(np.pi*1j) # (exact value is 0)
1.2246063538223773e-016j
>>> # Discrepancy due to vagaries of floating point arithmetic.
>>> # Example of providing the optional output parameter
>>> out2 = np.sinh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.sinh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'sqrt',
"""
Return the positive square-root of an array, element-wise.
Parameters
----------
x : array_like
The values whose square-roots are required.
out : ndarray, optional
Alternate array object in which to put the result; if provided, it
must have the same shape as `x`
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. If any element in `x` is
complex, a complex array is returned (and the square-roots of
negative reals are calculated). If all of the elements in `x`
are real, so is `y`, with negative elements returning ``nan``.
If `out` was provided, `y` is a reference to it.
See Also
--------
lib.scimath.sqrt
A version which returns complex numbers when given negative reals.
Notes
-----
*sqrt* has--consistent with common convention--as its branch cut the
real "interval" [`-inf`, 0), and is continuous from above on it.
A branch cut is a curve in the complex plane across which a given
complex function fails to be continuous.
Examples
--------
>>> np.sqrt([1,4,9])
array([ 1., 2., 3.])
>>> np.sqrt([4, -1, -3+4J])
array([ 2.+0.j, 0.+1.j, 1.+2.j])
>>> np.sqrt([4, -1, numpy.inf])
array([ 2., NaN, Inf])
""")
add_newdoc('numpy.core.umath', 'cbrt',
"""
Return the cube-root of an array, element-wise.
.. versionadded:: 1.10.0
Parameters
----------
x : array_like
The values whose cube-roots are required.
out : ndarray, optional
Alternate array object in which to put the result; if provided, it
must have the same shape as `x`
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the cube
cube-root of each element in `x`.
If `out` was provided, `y` is a reference to it.
Examples
--------
>>> np.cbrt([1,8,27])
array([ 1., 2., 3.])
""")
add_newdoc('numpy.core.umath', 'square',
"""
Return the element-wise square of the input.
Parameters
----------
x : array_like
Input data.
Returns
-------
out : ndarray
Element-wise `x*x`, of the same shape and dtype as `x`.
Returns scalar if `x` is a scalar.
See Also
--------
numpy.linalg.matrix_power
sqrt
power
Examples
--------
>>> np.square([-1j, 1])
array([-1.-0.j, 1.+0.j])
""")
add_newdoc('numpy.core.umath', 'subtract',
"""
Subtract arguments, element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be subtracted from each other.
Returns
-------
y : ndarray
The difference of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to ``x1 - x2`` in terms of array broadcasting.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[ 0., 0., 0.],
[ 3., 3., 3.],
[ 6., 6., 6.]])
""")
add_newdoc('numpy.core.umath', 'tan',
"""
Compute tangent element-wise.
Equivalent to ``np.sin(x)/np.cos(x)`` element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> from math import pi
>>> np.tan(np.array([-pi,pi/2,pi]))
array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16])
>>>
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'tanh',
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)`` or ``-1j * np.tan(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
.. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Hyperbolic function",
http://en.wikipedia.org/wiki/Hyperbolic_function
Examples
--------
>>> np.tanh((0, np.pi*1j, np.pi*1j/2))
array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j])
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.tanh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'true_divide',
"""
Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
Returns
-------
out : ndarray
Result is scalar if both inputs are scalar, ndarray otherwise.
Notes
-----
The floor division operator ``//`` was added in Python 2.2 making
``//`` and ``/`` equivalent operators. The default floor division
operation of ``/`` can be replaced by true division with ``from
__future__ import division``.
In Python 3.0, ``//`` is the floor division operator and ``/`` the
true division operator. The ``true_divide(x1, x2)`` function is
equivalent to true division in Python.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x/4
array([0, 0, 0, 0, 1])
>>> x//4
array([0, 0, 0, 0, 1])
>>> from __future__ import division
>>> x/4
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x//4
array([0, 0, 0, 0, 1])
""")
add_newdoc('numpy.core.umath', 'frexp',
"""
Decompose the elements of x into mantissa and twos exponent.
Returns (`mantissa`, `exponent`), where `x = mantissa * 2**exponent``.
The mantissa is lies in the open interval(-1, 1), while the twos
exponent is a signed integer.
Parameters
----------
x : array_like
Array of numbers to be decomposed.
out1 : ndarray, optional
Output array for the mantissa. Must have the same shape as `x`.
out2 : ndarray, optional
Output array for the exponent. Must have the same shape as `x`.
Returns
-------
(mantissa, exponent) : tuple of ndarrays, (float, int)
`mantissa` is a float array with values between -1 and 1.
`exponent` is an int array which represents the exponent of 2.
See Also
--------
ldexp : Compute ``y = x1 * 2**x2``, the inverse of `frexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
Examples
--------
>>> x = np.arange(9)
>>> y1, y2 = np.frexp(x)
>>> y1
array([ 0. , 0.5 , 0.5 , 0.75 , 0.5 , 0.625, 0.75 , 0.875,
0.5 ])
>>> y2
array([0, 1, 2, 2, 3, 3, 3, 3, 4])
>>> y1 * 2**y2
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8.])
""")
add_newdoc('numpy.core.umath', 'ldexp',
"""
Returns x1 * 2**x2, element-wise.
The mantissas `x1` and twos exponents `x2` are used to construct
floating point numbers ``x1 * 2**x2``.
Parameters
----------
x1 : array_like
Array of multipliers.
x2 : array_like, int
Array of twos exponents.
out : ndarray, optional
Output array for the result.
Returns
-------
y : ndarray or scalar
The result of ``x1 * 2**x2``.
See Also
--------
frexp : Return (y1, y2) from ``x = y1 * 2**y2``, inverse to `ldexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
more clear to simply use the expression ``x1 * 2**x2``.
Examples
--------
>>> np.ldexp(5, np.arange(4))
array([ 5., 10., 20., 40.], dtype=float32)
>>> x = np.arange(6)
>>> np.ldexp(*np.frexp(x))
array([ 0., 1., 2., 3., 4., 5.])
""")
| bsd-3-clause |
mfjb/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 22 | 45265 | from itertools import product
import pickle
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn import metrics
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_precomputed(random_state=42):
"""Tests unsupervised NearestNeighbors with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
for method in ['kneighbors']:
# TODO: also test radius_neighbors, but requires different assertion
# As a feature matrix (n_samples by n_features)
nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
nbrs_X.fit(X)
dist_X, ind_X = getattr(nbrs_X, method)(Y)
# As a dense distance matrix (n_samples by n_samples)
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check auto works too
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check X=None in prediction
dist_X, ind_X = getattr(nbrs_X, method)(None)
dist_D, ind_D = getattr(nbrs_D, method)(None)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Must raise a ValueError if the matrix is not of correct shape
assert_raises(ValueError, getattr(nbrs_D, method), X)
target = np.arange(X.shape[0])
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
print(Est)
est = Est(metric='euclidean')
est.radius = est.n_neighbors = 1
pred_X = est.fit(X, target).predict(Y)
est.metric = 'precomputed'
pred_D = est.fit(DXX, target).predict(DYX)
assert_array_almost_equal(pred_X, pred_D)
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric='euclidean')
y = rng.randint(3, size=20)
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
metric_score = cross_val_score(Est(), X, y)
precomp_score = cross_val_score(Est(metric='precomputed'), D, y)
assert_array_equal(metric_score, precomp_score)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([0.0], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_true(np.mean(rgs.predict(iris.data).round() == iris.target)
> 0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity')
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity')
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[])
if (isinstance(cls, neighbors.KNeighborsClassifier) or
isinstance(cls, neighbors.KNeighborsRegressor)):
nbrs = cls(n_neighbors=-1)
assert_raises(ValueError, nbrs.fit, X, y)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError,
nbrs.kneighbors_graph,
X, mode='blah')
assert_raises(ValueError,
nbrs.radius_neighbors_graph,
X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
metric = lambda x1, x2: np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto', metric=metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute', metric=metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(DeprecationWarning, neighbors.KNeighborsClassifier,
metric='wminkowski', w=np.ones(10))
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_kneighbors_parallel():
X, y = datasets.make_classification(n_samples=10, n_features=2,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm)
clf.fit(X_train, y_train)
y_1 = clf.predict(X_test)
dist_1, ind_1 = clf.kneighbors(X_test)
A_1 = clf.kneighbors_graph(X_test, mode='distance').toarray()
for n_jobs in [-1, 2, 5]:
clf.set_params(n_jobs=n_jobs)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
A = clf.kneighbors_graph(X_test, mode='distance').toarray()
assert_array_equal(y_1, y)
assert_array_almost_equal(dist_1, dist)
assert_array_equal(ind_1, ind)
assert_array_almost_equal(A_1, A)
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
| bsd-3-clause |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/sklearn/datasets/samples_generator.py | 2 | 40584 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe
# License: BSD 3 clause
from itertools import product
import numpy as np
from scipy import linalg
from ..utils import array2d, check_random_state
from ..utils import shuffle as util_shuffle
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
dupplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined in order to add covariance. The clusters
are then placed on the vertices of the hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=2)
The number of dupplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float or None, optional (default=0.0)
Shift all features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float or None, optional (default=1.0)
Multiply all features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
"be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
n_samples_per_cluster = []
for k in xrange(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in xrange(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples)
# Build the polytope
C = np.array(list(product([-class_sep, class_sep], repeat=n_informative)))
if not hypercube:
for k in xrange(n_clusters):
C[k, :] *= generator.rand()
for f in xrange(n_informative):
C[:, f] *= generator.rand()
generator.shuffle(C)
# Loop over all clusters
pos = 0
pos_end = 0
for k in xrange(n_clusters):
# Number of samples in cluster k
n_samples_k = n_samples_per_cluster[k]
# Define the range of samples
pos = pos_end
pos_end = pos + n_samples_k
# Assign labels
y[pos:pos_end] = k % n_classes
# Draw features at random
X[pos:pos_end, :n_informative] = generator.randn(n_samples_k,
n_informative)
# Multiply by a random matrix to create co-variance of the features
A = 2 * generator.rand(n_informative, n_informative) - 1
X[pos:pos_end, :n_informative] = np.dot(X[pos:pos_end, :n_informative],
A)
# Shift the cluster to a vertice
X[pos:pos_end, :n_informative] += np.tile(C[k, :], (n_samples_k, 1))
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.int)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
X[:, n_features - n_useless:] = generator.randn(n_samples, n_useless)
# Randomly flip labels
if flip_y >= 0.0:
for i in xrange(n_samples):
if generator.rand() < flip_y:
y[i] = generator.randint(n_classes)
# Randomly shift and scale
constant_shift = shift is not None
constant_scale = scale is not None
for f in xrange(n_features):
if not constant_shift:
shift = (2 * generator.rand() - 1) * class_sep
if not constant_scale:
scale = 1 + 100 * generator.rand()
X[:, f] += shift
X[:, f] *= scale
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = range(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50,
allow_unlabeled=True, random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. Number of labels follows
a Poisson distribution that never takes the value 0.
length : int, optional (default=50)
Sum of the features (number of words if documents).
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : list of tuples
The label sets.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
n = n_classes + 1
while (not allow_unlabeled and n == 0) or n > n_classes:
n = generator.poisson(n_labels)
# pick n classes
y = []
while len(y) != n:
# pick a class with probability P(c)
c = generator.multinomial(1, p_c).argmax()
if not c in y:
y.append(c)
# pick a non-zero document length by rejection sampling
k = 0
while k == 0:
k = generator.poisson(length)
# generate a document of length k words
x = np.zeros(n_features, dtype=int)
for i in range(k):
if len(y) == 0:
# if sample does not belong to any class, generate noise word
w = generator.randint(n_features)
else:
# pick a class and generate an appropriate word
c = y[generator.randint(len(y))]
w = generator.multinomial(1, p_w_c[:, c]).argmax()
x[w] += 1
return x, y
X, Y = zip(*[sample_example() for i in range(n_samples)])
return np.array(X, dtype=np.float64), Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] > 9.34 else -1
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
**References**:
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See the `make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = range(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2di
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
n_samples_out = int(n_samples / float(1 + factor))
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
n_samples_out, n_samples_in = n_samples_out + 1, n_samples_in + 1
outer_circ_x = np.cos(np.linspace(0, 2 * np.pi, n_samples_out)[:-1])
outer_circ_y = np.sin(np.linspace(0, 2 * np.pi, n_samples_out)[:-1])
inner_circ_x = (np.cos(np.linspace(0, 2 * np.pi, n_samples_in)[:-1])
* factor)
inner_circ_y = (np.sin(np.linspace(0, 2 * np.pi, n_samples_in)[:-1])
* factor)
X = np.vstack((np.append(outer_circ_x, inner_circ_x),\
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_out - 1), np.ones(n_samples_in - 1)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if not noise is None:
X += generator.normal(scale=noise, size=X.shape)
return X, y.astype(np.int)
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
"""
n_samples_out = n_samples / 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),\
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in), np.ones(n_samples_out)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if not noise is None:
X += generator.normal(scale=noise, size=X.shape)
return X, y.astype(np.int)
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print X.shape
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
"""
generator = check_random_state(random_state)
if isinstance(centers, (int, np.integer)):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = array2d(centers)
n_features = centers.shape[1]
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in xrange(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, n in enumerate(n_samples_per_center):
X.append(centers[i] + generator.normal(scale=cluster_std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman #1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman #2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman #3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
from ..utils.fixes import qr_economic
u, _ = qr_economic(generator.randn(n_samples, n))
v, _ = qr_economic(generator.randn(n_features, n))
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = (1 - tail_strength) * \
np.exp(-1.0 * (singular_ind / effective_rank) ** 2)
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in xrange(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symetric definite positive matrix.
Parameters
----------
dim: integer, optional (default=1)
The size of the random (matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
prec: array of shape = [dim, dim]
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have assymetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
d = np.diag(prec)
d = 1. / np.sqrt(d)
prec *= d
prec *= d[:, np.newaxis]
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perpsective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
| agpl-3.0 |
adamgreenhall/scikit-learn | sklearn/grid_search.py | 32 | 36586 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin, ChangedBehaviorWarning
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
This estimator will be cloned and then fitted.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each grid point.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default 1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, default=3
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if estimator is a classifier
and the target y is binary or multiclass, or the number
of folds in KFold otherwise.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each parameter setting.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, optional
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if estimator is a classifier
and the target y is binary or multiclass, or the number
of folds in KFold otherwise.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| bsd-3-clause |
erjerison/adaptability | github_submission/qtl_detection_one_trait.py | 1 | 31004 | import qtl_detection_adaptability
import numpy
import matplotlib.pylab as pt
import regression
##Updated 11-2-2016 to include a second epistasis model, 'detect_qtls_with_epistasis2'
##Updated 12-21-2016 to calculate confidence intervals based on LOD drop-off during QTL detection
##Updated 1-18-2016 to include a function for finding QTLs on the two environments separately
def detect_qtls(genotype_mat, phenotype_list_sc, phenotype_list_ypd, helper_matrix_sc, helper_matrix_ypd, pops_per_seg_sc, pops_per_seg_ypd):
#Initialize residuals as phenotypes; format is [<phenotype>,<phenotype**2>,pops_per_seg]
n_segs = len(pops_per_seg_sc)
phenotype_means_sc = numpy.dot(phenotype_list_sc, helper_matrix_sc)/pops_per_seg_sc
phenotype_second_moments_sc = numpy.dot(phenotype_list_sc**2, helper_matrix_sc)/pops_per_seg_sc
phenotypes_sc = numpy.append(phenotype_means_sc.reshape((n_segs,1)), phenotype_second_moments_sc.reshape((n_segs,1)), axis=1)
phenotypes_sc = numpy.append(phenotypes_sc, pops_per_seg_sc.reshape((n_segs,1)), axis = 1)
phenotype_means_ypd = numpy.dot(phenotype_list_ypd, helper_matrix_ypd)/pops_per_seg_ypd
phenotype_second_moments_ypd = numpy.dot(phenotype_list_ypd**2, helper_matrix_ypd)/pops_per_seg_ypd
phenotypes_ypd = numpy.append(phenotype_means_ypd.reshape((n_segs,1)), phenotype_second_moments_ypd.reshape((n_segs,1)), axis=1)
phenotypes_ypd = numpy.append(phenotypes_ypd, pops_per_seg_ypd.reshape((n_segs,1)), axis = 1)
expanded_genotype_mat_sc = numpy.dot(helper_matrix_sc, genotype_mat)
expanded_genotype_mat_ypd = numpy.dot(helper_matrix_ypd, genotype_mat)
lods_sc = qtl_detection_adaptability.calculate_lods(genotype_mat, phenotypes_sc)
lods_ypd = qtl_detection_adaptability.calculate_lods(genotype_mat, phenotypes_ypd)
lods = lods_sc + lods_ypd
top_lod = numpy.nanmax(lods)
top_lod_idx = numpy.nanargmax(lods)
##Confidence intervals around this peak
intervals = []
relative_height_lb = top_lod
relative_height_ub = top_lod
lb_index = top_lod_idx
ub_index = top_lod_idx
consecutive_low_scores = 0
while consecutive_low_scores < 40:
lb_index -= 1
relative_height_lb = lods[lb_index]
if relative_height_lb < top_lod - 1.5:
consecutive_low_scores += 1
else:
consecutive_low_scores = 0
if consecutive_low_scores == 1:
first_consecutive_low_idx = lb_index
consecutive_low_scores = 0
while consecutive_low_scores < 40:
ub_index += 1
relative_height_ub = lods[ub_index]
if relative_height_ub < top_lod - 1.5:
consecutive_low_scores += 1
else:
consecutive_low_scores = 0
if consecutive_low_scores == 1:
first_consecutive_high_idx = ub_index
bootstrapped_lods = []
n_iter = 1000
for i in xrange(0,n_iter):
#print i
#print "Generating permutation..."
permutation = numpy.random.permutation(numpy.arange(n_segs))
permuted_phenotype_matrix_ypd = phenotypes_ypd[permutation,:]
permuted_phenotype_matrix_sc = phenotypes_sc[permutation,:]
permuted_lods_ypd = qtl_detection_adaptability.calculate_lods(genotype_mat, permuted_phenotype_matrix_ypd)
permuted_lods_sc = qtl_detection_adaptability.calculate_lods(genotype_mat, permuted_phenotype_matrix_sc)
permuted_lods = permuted_lods_sc + permuted_lods_ypd
#print permuted_lods
bootstrapped_lods.append(numpy.nanmax(permuted_lods))
#print bootstrapped_lods
sig_threshold = numpy.sort(bootstrapped_lods)[::-1][49]
#sig_threshold = numpy.sort(bootstrapped_lods)[::-1][99]
# pt.plot(lods)
# pt.plot(permuted_lods,'g')
# pt.axhline(sig_threshold, 0, 1, 'k')
# pt.show()
print 'sig_threshold =', sig_threshold
#print chromosome_peak_lod_idxs
if top_lod > sig_threshold:
new_QTLs = [top_lod_idx]
intervals.append([first_consecutive_low_idx, first_consecutive_high_idx])
print intervals
current_QTLs = new_QTLs
all_QTLs_found = False
while all_QTLs_found ==False:
print current_QTLs
#Fit a linear model using the current QTL list--or a nonlinear model
qtl_matrix_sc = expanded_genotype_mat_sc[:,current_QTLs]
qtl_matrix_ypd = expanded_genotype_mat_ypd[:,current_QTLs]
beta_sc, betanorm_sc, F_sc = regression.ordinary_linear_regression(phenotype_list_sc,qtl_matrix_sc)
beta_ypd, betanorm_ypd, F_ypd = regression.ordinary_linear_regression(phenotype_list_ypd,qtl_matrix_ypd)
residuals_sc = phenotype_list_sc - F_sc(numpy.dot(beta_sc,qtl_matrix_sc.T))
residuals_ypd = phenotype_list_ypd - F_ypd(numpy.dot(beta_ypd,qtl_matrix_ypd.T))
residuals_means_sc = numpy.dot(residuals_sc, helper_matrix_sc)/pops_per_seg_sc
residuals_second_moments_sc = numpy.dot(residuals_sc**2, helper_matrix_sc)/pops_per_seg_sc
phenotypes_new_sc = numpy.append(residuals_means_sc.reshape((n_segs,1)),residuals_second_moments_sc.reshape((n_segs,1)), axis=1)
phenotypes_new_sc = numpy.append(phenotypes_new_sc, pops_per_seg_sc.reshape((n_segs,1)), axis = 1)
residuals_means_ypd = numpy.dot(residuals_ypd, helper_matrix_ypd)/pops_per_seg_ypd
residuals_second_moments_ypd = numpy.dot(residuals_ypd**2, helper_matrix_ypd)/pops_per_seg_ypd
phenotypes_new_ypd = numpy.append(residuals_means_ypd.reshape((n_segs,1)), residuals_second_moments_ypd.reshape((n_segs,1)), axis=1)
phenotypes_new_ypd = numpy.append(phenotypes_new_ypd, pops_per_seg_ypd.reshape((n_segs,1)), axis = 1)
lods_sc = qtl_detection_adaptability.calculate_lods(genotype_mat, phenotypes_new_sc)
lods_ypd = qtl_detection_adaptability.calculate_lods(genotype_mat, phenotypes_new_ypd)
lods = lods_sc + lods_ypd
top_lod = numpy.nanmax(lods)
top_lod_idx = numpy.nanargmax(lods)
relative_height_lb = top_lod
relative_height_ub = top_lod
lb_index = top_lod_idx
ub_index = top_lod_idx
consecutive_low_scores = 0
while consecutive_low_scores < 20:
lb_index -= 1
relative_height_lb = lods[lb_index]
if relative_height_lb < top_lod - 2:
consecutive_low_scores += 1
else:
consecutive_low_scores = 0
if consecutive_low_scores == 1:
first_consecutive_low_idx = lb_index
consecutive_low_scores = 0
while consecutive_low_scores < 20:
ub_index += 1
relative_height_ub = lods[ub_index]
if relative_height_ub < top_lod - 2:
consecutive_low_scores += 1
else:
consecutive_low_scores = 0
if consecutive_low_scores == 1:
first_consecutive_high_idx = ub_index
bootstrapped_lods = []
n_iter = 1000
for i in xrange(0,n_iter):
permutation = numpy.random.permutation(numpy.arange(n_segs))
permuted_phenotype_matrix_ypd = phenotypes_new_ypd[permutation,:]
permuted_phenotype_matrix_sc = phenotypes_new_sc[permutation,:]
permuted_lods_ypd = qtl_detection_adaptability.calculate_lods(genotype_mat, permuted_phenotype_matrix_ypd)
permuted_lods_sc = qtl_detection_adaptability.calculate_lods(genotype_mat, permuted_phenotype_matrix_sc)
permuted_lods = permuted_lods_sc + permuted_lods_ypd
bootstrapped_lods.append(numpy.nanmax(permuted_lods))
sig_threshold = numpy.sort(bootstrapped_lods)[-50] #p < .05
#sig_threshold = numpy.sort(bootstrapped_lods)[-100] #p < .05
print 'sig_threshold =', sig_threshold
#pt.plot(lods)
#pt.plot(permuted_lods,'g')
#pt.axhline(sig_threshold, 0, 1, 'k')
#pt.show()
if top_lod > sig_threshold:
current_QTLs.append(top_lod_idx)
intervals.append([first_consecutive_low_idx, first_consecutive_high_idx])
else:
print 'all_QTLs_found'
all_QTLs_found = True
return current_QTLs, beta_ypd*betanorm_ypd, beta_sc*betanorm_sc, numpy.array(intervals)
def detect_qtls_one_envt(genotype_mat, phenotype_list, helper_matrix, pops_per_seg):
#Initialize residuals as phenotypes; format is [<phenotype>,<phenotype**2>,pops_per_seg]
n_segs = len(pops_per_seg)
phenotype_means = numpy.dot(phenotype_list, helper_matrix)/pops_per_seg
phenotype_second_moments = numpy.dot(phenotype_list**2, helper_matrix)/pops_per_seg
phenotypes = numpy.append(phenotype_means.reshape((n_segs,1)), phenotype_second_moments.reshape((n_segs,1)), axis=1)
phenotypes = numpy.append(phenotypes, pops_per_seg.reshape((n_segs,1)), axis = 1)
expanded_genotype_mat = numpy.dot(helper_matrix, genotype_mat)
lods = qtl_detection_adaptability.calculate_lods(genotype_mat, phenotypes)
top_lod = numpy.nanmax(lods)
top_lod_idx = numpy.nanargmax(lods)
##Confidence intervals around this peak
intervals = []
relative_height_lb = top_lod
relative_height_ub = top_lod
lb_index = top_lod_idx
ub_index = top_lod_idx
consecutive_lowores = 0
while consecutive_lowores < 20:
lb_index -= 1
relative_height_lb = lods[lb_index]
if relative_height_lb < top_lod - 1.5:
consecutive_lowores += 1
else:
consecutive_lowores = 0
if consecutive_lowores == 1:
first_consecutive_low_idx = lb_index
consecutive_lowores = 0
while consecutive_lowores < 20:
ub_index += 1
relative_height_ub = lods[ub_index]
if relative_height_ub < top_lod - 1.5:
consecutive_lowores += 1
else:
consecutive_lowores = 0
if consecutive_lowores == 1:
first_consecutive_high_idx = ub_index
bootstrapped_lods = []
n_iter = 1000
for i in xrange(0,n_iter):
#print i
#print "Generating permutation..."
permutation = numpy.random.permutation(numpy.arange(n_segs))
permuted_phenotype_matrix = phenotypes[permutation,:]
permuted_lods = qtl_detection_adaptability.calculate_lods(genotype_mat, permuted_phenotype_matrix)
#print permuted_lods
bootstrapped_lods.append(numpy.nanmax(permuted_lods))
#print bootstrapped_lods
sig_threshold = numpy.sort(bootstrapped_lods)[::-1][49]
#sig_threshold = numpy.sort(bootstrapped_lods)[::-1][99]
# pt.plot(lods)
# pt.plot(permuted_lods,'g')
# pt.axhline(sig_threshold, 0, 1, 'k')
# pt.show()
print 'sig_threshold =', sig_threshold
#print chromosome_peak_lod_idxs
if top_lod > sig_threshold:
new_QTLs = [top_lod_idx]
intervals.append([first_consecutive_low_idx, first_consecutive_high_idx])
print intervals
current_QTLs = new_QTLs
all_QTLs_found = False
while all_QTLs_found ==False:
print current_QTLs
#Fit a linear model using the current QTL list--or a nonlinear model
qtl_matrix = expanded_genotype_mat[:,current_QTLs]
beta, betanorm, F = regression.ordinary_linear_regression(phenotype_list,qtl_matrix)
residuals = phenotype_list - F(numpy.dot(beta,qtl_matrix.T))
residuals_means = numpy.dot(residuals, helper_matrix)/pops_per_seg
residuals_second_moments = numpy.dot(residuals**2, helper_matrix)/pops_per_seg
phenotypes_new = numpy.append(residuals_means.reshape((n_segs,1)),residuals_second_moments.reshape((n_segs,1)), axis=1)
phenotypes_new = numpy.append(phenotypes_new, pops_per_seg.reshape((n_segs,1)), axis = 1)
lods = qtl_detection_adaptability.calculate_lods(genotype_mat, phenotypes_new)
top_lod = numpy.nanmax(lods)
top_lod_idx = numpy.nanargmax(lods)
relative_height_lb = top_lod
relative_height_ub = top_lod
lb_index = top_lod_idx
ub_index = top_lod_idx
consecutive_lowores = 0
while consecutive_lowores < 20:
lb_index -= 1
relative_height_lb = lods[lb_index]
if relative_height_lb < top_lod - 1.5:
consecutive_lowores += 1
else:
consecutive_lowores = 0
if consecutive_lowores == 1:
first_consecutive_low_idx = lb_index
consecutive_lowores = 0
while consecutive_lowores < 20:
ub_index += 1
relative_height_ub = lods[ub_index]
if relative_height_ub < top_lod - 1.5:
consecutive_lowores += 1
else:
consecutive_lowores = 0
if consecutive_lowores == 1:
first_consecutive_high_idx = ub_index
bootstrapped_lods = []
n_iter = 1000
for i in xrange(0,n_iter):
permutation = numpy.random.permutation(numpy.arange(n_segs))
permuted_phenotype_matrix = phenotypes_new[permutation,:]
permuted_lods = qtl_detection_adaptability.calculate_lods(genotype_mat, permuted_phenotype_matrix)
bootstrapped_lods.append(numpy.nanmax(permuted_lods))
sig_threshold = numpy.sort(bootstrapped_lods)[-50] #p < .05
#sig_threshold = numpy.sort(bootstrapped_lods)[-100] #p < .05
print 'sig_threshold =', sig_threshold
#pt.plot(lods)
#pt.plot(permuted_lods,'g')
#pt.axhline(sig_threshold, 0, 1, 'k')
#pt.show()
if top_lod > sig_threshold:
current_QTLs.append(top_lod_idx)
intervals.append([first_consecutive_low_idx, first_consecutive_high_idx])
else:
print 'all_QTLs_found'
all_QTLs_found = True
return current_QTLs, beta*betanorm, numpy.array(intervals)
def calculate_qtl_confidence_intervals_lods(qtl_locs, genotype_mat, phenotype_sc, phenotype_ypd, helper_matrix_sc=numpy.identity(229), helper_matrix_ypd=numpy.identity(229), pops_per_seg_sc=numpy.ones((229,)), pops_per_seg_ypd=numpy.ones((229,))):
#This function takes an arbitrary number of phenotypes (columns of phenotype_mat) and assumes qtls have been detected on them jointly
#evol_env_vector records which environment populations with a given phenotype evolved, if applicable; 1=sc at 37 C, 0=ypd at 30 C.
#Confidence intervals are calculated based on the location at which the LOD score (log-likelihood) falls to half its maximum value.
#Initialize residuals as phenotypes; format is [<phenotype>,<phenotype**2>,pops_per_seg]
n_segs = len(pops_per_seg_sc)
#n_phenotypes = len(evol_env_vector)
n_loci = genotype_mat.shape[1]
lod_idxs = []
intervals = []
real_qtl_locs = []
##Set up phenotype matrixes
phenotype_means_sc = numpy.dot(phenotype_sc, helper_matrix_sc)/pops_per_seg_sc
phenotype_second_moments_sc = numpy.dot(phenotype_sc**2, helper_matrix_sc)/pops_per_seg_sc
phenotypes_sc = numpy.append(phenotype_means_sc.reshape((n_segs,1)), phenotype_second_moments_sc.reshape((n_segs,1)), axis=1)
phenotypes_sc = numpy.append(phenotypes_sc, pops_per_seg_sc.reshape((n_segs,1)), axis = 1)
phenotype_means_ypd = numpy.dot(phenotype_ypd, helper_matrix_ypd)/pops_per_seg_ypd
phenotype_second_moments_ypd = numpy.dot(phenotype_ypd**2, helper_matrix_ypd)/pops_per_seg_ypd
phenotypes_ypd = numpy.append(phenotype_means_ypd.reshape((n_segs,1)), phenotype_second_moments_ypd.reshape((n_segs,1)), axis=1)
phenotypes_ypd = numpy.append(phenotypes_ypd, pops_per_seg_ypd.reshape((n_segs,1)), axis = 1)
lods = numpy.zeros((n_loci,))
lods += qtl_detection_adaptability.calculate_lods(genotype_mat, phenotypes_sc)
lods += qtl_detection_adaptability.calculate_lods(genotype_mat, phenotypes_ypd)
for qtl in qtl_locs:
if qtl > 20:
real_peak = qtl - 20 + numpy.nanargmax(lods[qtl-20:qtl+20])
else:
real_peak = numpy.nanargmax(lods[0:qtl+20])
peak_height = lods[real_peak]
relative_height_lb = peak_height
relative_height_ub = peak_height
lb_index = real_peak
ub_index = real_peak
print real_peak
print peak_height
while relative_height_lb > .5*peak_height:
lb_index -= 1
relative_height_lb = lods[lb_index]
while relative_height_ub > .5*peak_height:
ub_index += 1
relative_height_ub = lods[ub_index]
intervals.append([lb_index, ub_index])
real_qtl_locs.append(real_peak)
return real_qtl_locs, numpy.array(intervals)
def detect_qtls_above_fitness(genotype_mat, phenotype_list_sc, phenotype_list_ypd, initfit_list_sc, initfit_list_ypd, helper_matrix_sc, helper_matrix_ypd, pops_per_seg_sc, pops_per_seg_ypd):
#Initialize residuals as phenotypes; format is [<phenotype>,<phenotype**2>,pops_per_seg]
n_segs = len(pops_per_seg_sc)
n_pops_sc = sum(pops_per_seg_sc)
n_pops_ypd = sum(pops_per_seg_ypd)
expanded_genotype_mat_sc = numpy.dot(helper_matrix_sc, genotype_mat)
expanded_genotype_mat_ypd = numpy.dot(helper_matrix_ypd, genotype_mat)
#symmetrize the genotype matrix
genotype_mat = 1./2.*(genotype_mat - (1 - genotype_mat))
expanded_genotype_mat_sc = 1./2.*(expanded_genotype_mat_sc - (1 - expanded_genotype_mat_sc))
expanded_genotype_mat_ypd = 1./2.*(expanded_genotype_mat_ypd - (1 - expanded_genotype_mat_ypd))
#Initial dependent variables are initial fitnesses
X_sc = numpy.dot(helper_matrix_sc, initfit_list_sc).reshape((n_pops_sc,1))
X_ypd = numpy.dot(helper_matrix_ypd, initfit_list_ypd).reshape((n_pops_ypd,1))
current_QTLs = []
all_QTLs_found = False
while all_QTLs_found ==False:
print current_QTLs
#Fit a linear model using the current QTL list--or a nonlinear model
qtl_matrix_sc = numpy.append(X_sc, expanded_genotype_mat_sc[:,current_QTLs], axis = 1)
qtl_matrix_ypd = numpy.append(X_ypd, expanded_genotype_mat_ypd[:,current_QTLs], axis = 1)
beta_sc, betanorm_sc, F_sc = regression.ordinary_linear_regression(phenotype_list_sc,qtl_matrix_sc)
beta_ypd, betanorm_ypd, F_ypd = regression.ordinary_linear_regression(phenotype_list_ypd,qtl_matrix_ypd)
residuals_sc = phenotype_list_sc - F_sc(numpy.dot(beta_sc,qtl_matrix_sc.T))
residuals_ypd = phenotype_list_ypd - F_ypd(numpy.dot(beta_ypd,qtl_matrix_ypd.T))
residuals_means_sc = numpy.dot(residuals_sc, helper_matrix_sc)/pops_per_seg_sc
residuals_second_moments_sc = numpy.dot(residuals_sc**2, helper_matrix_sc)/pops_per_seg_sc
phenotypes_new_sc = numpy.append(residuals_means_sc.reshape((n_segs,1)),residuals_second_moments_sc.reshape((n_segs,1)), axis=1)
phenotypes_new_sc = numpy.append(phenotypes_new_sc, pops_per_seg_sc.reshape((n_segs,1)), axis = 1)
residuals_means_ypd = numpy.dot(residuals_ypd, helper_matrix_ypd)/pops_per_seg_ypd
residuals_second_moments_ypd = numpy.dot(residuals_ypd**2, helper_matrix_ypd)/pops_per_seg_ypd
phenotypes_new_ypd = numpy.append(residuals_means_ypd.reshape((n_segs,1)), residuals_second_moments_ypd.reshape((n_segs,1)), axis=1)
phenotypes_new_ypd = numpy.append(phenotypes_new_ypd, pops_per_seg_ypd.reshape((n_segs,1)), axis = 1)
lods_sc = qtl_detection_adaptability.calculate_lods(genotype_mat, phenotypes_new_sc)
lods_ypd = qtl_detection_adaptability.calculate_lods(genotype_mat, phenotypes_new_ypd)
lods = lods_sc + lods_ypd
#pt.plot(lods)
top_lod = numpy.nanmax(lods)
top_lod_idx = numpy.nanargmax(lods)
#print top_lod
bootstrapped_lods = []
n_iter = 1000
for i in xrange(0,n_iter):
permutation = numpy.random.permutation(numpy.arange(n_segs))
permuted_phenotype_matrix_ypd = phenotypes_new_ypd[permutation,:]
permuted_phenotype_matrix_sc = phenotypes_new_sc[permutation,:]
permuted_lods_ypd = qtl_detection_adaptability.calculate_lods(genotype_mat, permuted_phenotype_matrix_ypd)
permuted_lods_sc = qtl_detection_adaptability.calculate_lods(genotype_mat, permuted_phenotype_matrix_sc)
permuted_lods = permuted_lods_sc + permuted_lods_ypd
bootstrapped_lods.append(numpy.nanmax(permuted_lods))
sig_threshold = numpy.sort(bootstrapped_lods)[-49]
print 'sig_threshold =', sig_threshold
#print numpy.sort(bootstrapped_lods)
#pt.plot(permuted_lods,'g')
#pt.axhline(sig_threshold,0,1,'k')
#pt.show()
if top_lod > sig_threshold:
current_QTLs.append(top_lod_idx)
else:
print 'all_QTLs_found'
all_QTLs_found = True
return current_QTLs, beta_ypd*betanorm_ypd, beta_sc*betanorm_sc
def detect_qtls_with_epistasis(genotype_mat, phenotype_list_sc, phenotype_list_ypd, helper_matrix_sc, helper_matrix_ypd, pops_per_seg_sc, pops_per_seg_ypd):
#Initialize residuals as phenotypes; format is [<phenotype>,<phenotype**2>,pops_per_seg]
n_segs = len(pops_per_seg_sc)
n_pops_sc = sum(pops_per_seg_sc)
n_pops_ypd = sum(pops_per_seg_ypd)
expanded_genotype_mat_sc = numpy.dot(helper_matrix_sc, genotype_mat)
expanded_genotype_mat_ypd = numpy.dot(helper_matrix_ypd, genotype_mat)
#symmetrize the genotype matrix
genotype_mat = 1./2.*(genotype_mat - (1 - genotype_mat))
expanded_genotype_mat_sc = 1./2.*(expanded_genotype_mat_sc - (1 - expanded_genotype_mat_sc))
expanded_genotype_mat_ypd = 1./2.*(expanded_genotype_mat_ypd - (1 - expanded_genotype_mat_ypd))
kre33_loc = 9596
kre_genotypes = genotype_mat[:,kre33_loc]
kre_genotypes_sc = expanded_genotype_mat_sc[:,kre33_loc]
kre_genotypes_ypd = expanded_genotype_mat_ypd[:,kre33_loc]
current_main_effect_QTLs = []#new_QTLs
current_epistatic_QTLs = []
all_QTLs_found = False
while all_QTLs_found ==False:
print current_main_effect_QTLs
print current_epistatic_QTLs
#Fit a linear model using the current QTL list--or a nonlinear model
coefficient_matrix_sc = kre_genotypes_sc.reshape((n_pops_sc,1))
if len(current_main_effect_QTLs) > .5:
coefficient_matrix_sc = numpy.append(coefficient_matrix_sc, expanded_genotype_mat_sc[:,current_main_effect_QTLs], axis=1)
if len(current_epistatic_QTLs) > .5:
coefficient_matrix_sc = numpy.append(coefficient_matrix_sc, kre_genotypes_sc.reshape((n_pops_sc,1))*expanded_genotype_mat_sc[:,current_epistatic_QTLs], axis=1)
coefficient_matrix_ypd = kre_genotypes_ypd.reshape((n_pops_ypd,1))
if len(current_main_effect_QTLs) > .5:
coefficient_matrix_ypd = numpy.append(coefficient_matrix_ypd, expanded_genotype_mat_ypd[:,current_main_effect_QTLs], axis=1)
if len(current_epistatic_QTLs) > .5:
coefficient_matrix_ypd = numpy.append(coefficient_matrix_ypd, kre_genotypes_ypd.reshape((n_pops_ypd,1))*expanded_genotype_mat_ypd[:,current_epistatic_QTLs], axis=1)
beta_sc, betanorm_sc, F_sc = regression.ordinary_linear_regression(phenotype_list_sc,coefficient_matrix_sc)
beta_ypd, betanorm_ypd, F_ypd = regression.ordinary_linear_regression(phenotype_list_ypd,coefficient_matrix_ypd)
residuals_sc = phenotype_list_sc - F_sc(numpy.dot(beta_sc,coefficient_matrix_sc.T))
residuals_ypd = phenotype_list_ypd - F_ypd(numpy.dot(beta_ypd,coefficient_matrix_ypd.T))
residuals_means_sc = numpy.dot(residuals_sc, helper_matrix_sc)/pops_per_seg_sc
residuals_second_moments_sc = numpy.dot(residuals_sc**2, helper_matrix_sc)/pops_per_seg_sc
phenotypes_new_sc = numpy.append(residuals_means_sc.reshape((n_segs,1)),residuals_second_moments_sc.reshape((n_segs,1)), axis=1)
phenotypes_new_sc = numpy.append(phenotypes_new_sc, pops_per_seg_sc.reshape((n_segs,1)), axis = 1)
residuals_means_ypd = numpy.dot(residuals_ypd, helper_matrix_ypd)/pops_per_seg_ypd
residuals_second_moments_ypd = numpy.dot(residuals_ypd**2, helper_matrix_ypd)/pops_per_seg_ypd
phenotypes_new_ypd = numpy.append(residuals_means_ypd.reshape((n_segs,1)), residuals_second_moments_ypd.reshape((n_segs,1)), axis=1)
phenotypes_new_ypd = numpy.append(phenotypes_new_ypd, pops_per_seg_ypd.reshape((n_segs,1)), axis = 1)
#print phenotypes_new_sc
##Calculate lods for new main-effect loci
lods_sc = qtl_detection_adaptability.calculate_lods(genotype_mat, phenotypes_new_sc)
lods_ypd = qtl_detection_adaptability.calculate_lods(genotype_mat, phenotypes_new_ypd)
lods = lods_sc + lods_ypd
# pt.figure()
# pt.plot(lods)
# pt.show()
top_lod = numpy.nanmax(lods)
top_lod_idx = numpy.nanargmax(lods)
print top_lod
##Calculate potential epistatic effects of loci already in the model
if len(current_main_effect_QTLs) > .5:
genotype_mat_interactions = kre_genotypes.reshape((n_segs,1))*genotype_mat[:,current_main_effect_QTLs]
#print genotype_mat_interactions
lods_sc_ints = qtl_detection_adaptability.calculate_lods(genotype_mat_interactions, phenotypes_new_sc)
lods_ypd_ints = qtl_detection_adaptability.calculate_lods(genotype_mat_interactions, phenotypes_new_ypd)
lods_interactions = lods_sc_ints + lods_ypd_ints
top_lod_int = numpy.nanmax(lods_interactions)
top_lod_int_idx = current_main_effect_QTLs[numpy.nanargmax(lods_interactions)]
print top_lod_int
else:
top_lod_int = 0
bootstrapped_lods = []
n_iter = 1000
for i in xrange(0,n_iter):
permutation = numpy.random.permutation(numpy.arange(n_segs))
permuted_phenotype_matrix_ypd = phenotypes_new_ypd[permutation,:]
permuted_phenotype_matrix_sc = phenotypes_new_sc[permutation,:]
permuted_lods_ypd = qtl_detection_adaptability.calculate_lods(genotype_mat, permuted_phenotype_matrix_ypd)
permuted_lods_sc = qtl_detection_adaptability.calculate_lods(genotype_mat, permuted_phenotype_matrix_sc)
permuted_lods = permuted_lods_sc + permuted_lods_ypd
if len(current_main_effect_QTLs) > .5:
permuted_lods_sc_ints = qtl_detection_adaptability.calculate_lods(genotype_mat_interactions, permuted_phenotype_matrix_sc)
permuted_lods_ypd_ints = qtl_detection_adaptability.calculate_lods(genotype_mat_interactions, permuted_phenotype_matrix_ypd)
permuted_lods_interactions = permuted_lods_sc_ints + permuted_lods_ypd_ints
all_permuted_lods = numpy.append(permuted_lods, permuted_lods_interactions)
else:
all_permuted_lods = permuted_lods
bootstrapped_lods.append(numpy.nanmax(all_permuted_lods))
sig_threshold = numpy.sort(bootstrapped_lods)[-49]
print 'sig_threshold =', sig_threshold
if (top_lod > sig_threshold or top_lod_int > sig_threshold):
if top_lod > top_lod_int:
current_main_effect_QTLs.append(top_lod_idx)
elif top_lod_int > top_lod:
current_epistatic_QTLs.append(top_lod_int_idx)
else:
print 'all_QTLs_found'
all_QTLs_found = True
return current_main_effect_QTLs, current_epistatic_QTLs, beta_ypd*betanorm_ypd, beta_sc*betanorm_sc
def detect_qtls_with_epistasis2(genotype_mat, phenotype_list_sc, initfit_list_sc, phenotype_list_ypd, initfit_list_ypd, helper_matrix_sc, helper_matrix_ypd, pops_per_seg_sc, pops_per_seg_ypd):
#Initialize residuals as phenotypes; format is [<phenotype>,<phenotype**2>,pops_per_seg]
n_segs = len(pops_per_seg_sc)
n_pops_sc = sum(pops_per_seg_sc)
n_pops_ypd = sum(pops_per_seg_ypd)
expanded_genotype_mat_sc = numpy.dot(helper_matrix_sc, genotype_mat)
expanded_genotype_mat_ypd = numpy.dot(helper_matrix_ypd, genotype_mat)
#symmetrize the genotype matrix
genotype_mat = 1./2.*(genotype_mat - (1 - genotype_mat))
expanded_genotype_mat_sc = expanded_genotype_mat_sc - .5
expanded_genotype_mat_ypd = expanded_genotype_mat_ypd - .5
#expanded_genotype_mat_sc = 1./2.*(expanded_genotype_mat_sc - (1 - expanded_genotype_mat_sc))
#expanded_genotype_mat_ypd = 1./2.*(expanded_genotype_mat_ypd - (1 - expanded_genotype_mat_ypd))
kre33_loc = 9596
kre_genotypes = genotype_mat[:,kre33_loc]
kre_genotypes_sc = expanded_genotype_mat_sc[:,kre33_loc]
kre_genotypes_ypd = expanded_genotype_mat_ypd[:,kre33_loc]
current_QTLs = [] #new_QTLs
#At each step we are going to fit the model: delta_X = a + bX + c*kre_genotypes + sum_i=1^n_qtls d_i1*kre_genotypes*g_i + d_i2*(1-kre_genotypes)*g_i
#At the final step, we will fit the full model and determine if all the coefficients are significant.
##Initialize: fit delta_X = a + bX + c*kre_genotypes
X_sc = numpy.concatenate((numpy.dot(helper_matrix_sc, initfit_list_sc).reshape((n_pops_sc,1)), kre_genotypes_sc.reshape((n_pops_sc,1)), numpy.ones((n_pops_sc,1))), axis = 1)
X_ypd = numpy.concatenate((numpy.dot(helper_matrix_ypd, initfit_list_ypd).reshape((n_pops_ypd,1)), kre_genotypes_ypd.reshape((n_pops_ypd,1)),numpy.ones((n_pops_ypd,1))), axis = 1)
all_QTLs_found = False
while all_QTLs_found ==False:
#If this is not the first iteration, add the (potentially epistatic) qtls to the model
if len(current_QTLs) > .5:
qtl_mat_sc = expanded_genotype_mat_sc[:, current_QTLs]
qtl_mat_ypd = expanded_genotype_mat_ypd[:, current_QTLs]
X_sc_temp = numpy.concatenate((X_sc, qtl_mat_sc, qtl_mat_sc*kre_genotypes_sc.reshape((n_pops_sc,1))), axis=1)
X_ypd_temp = numpy.concatenate((X_ypd, qtl_mat_ypd, qtl_mat_ypd*kre_genotypes_ypd.reshape((n_pops_ypd,1))), axis=1)
#print X_sc_temp.shape
else:
X_sc_temp = X_sc
X_ypd_temp = X_ypd
#Calculate residuals:
beta_sc = numpy.dot(numpy.linalg.inv(numpy.dot(X_sc_temp.T, X_sc_temp)), numpy.dot(X_sc_temp.T, phenotype_list_sc))
residuals_sc = phenotype_list_sc - numpy.dot(X_sc_temp, beta_sc) #check dot product direction
beta_ypd = numpy.dot(numpy.linalg.inv(numpy.dot(X_ypd_temp.T, X_ypd_temp)), numpy.dot(X_ypd_temp.T, phenotype_list_ypd))
residuals_ypd = phenotype_list_ypd - numpy.dot(X_ypd_temp, beta_ypd)
residuals_means_sc = numpy.dot(residuals_sc, helper_matrix_sc)/pops_per_seg_sc
residuals_second_moments_sc = numpy.dot(residuals_sc**2, helper_matrix_sc)/pops_per_seg_sc
residual_mat_sc = numpy.concatenate((residuals_means_sc.reshape((n_segs,1)), residuals_second_moments_sc.reshape((n_segs,1)), pops_per_seg_sc.reshape((n_segs,1))), axis=1)
residuals_means_ypd = numpy.dot(residuals_ypd, helper_matrix_ypd)/pops_per_seg_ypd
residuals_second_moments_ypd = numpy.dot(residuals_ypd**2, helper_matrix_ypd)/pops_per_seg_ypd
residual_mat_ypd = numpy.concatenate((residuals_means_ypd.reshape((n_segs,1)), residuals_second_moments_ypd.reshape((n_segs,1)), pops_per_seg_ypd.reshape((n_segs,1))), axis=1)
##Calculate lods for new loci
lods_sc = qtl_detection_adaptability.calculate_lods(genotype_mat, residual_mat_sc)
lods_ypd = qtl_detection_adaptability.calculate_lods(genotype_mat, residual_mat_ypd)
lods = lods_sc + lods_ypd
top_lod = numpy.nanmax(lods)
top_lod_idx = numpy.nanargmax(lods)
##Bootstrap over segregants
bootstrapped_lods = []
n_iter = 1000
for i in xrange(0,n_iter):
permutation = numpy.random.permutation(numpy.arange(n_segs))
permuted_phenotype_matrix_ypd = residual_mat_ypd[permutation,:]
permuted_phenotype_matrix_sc = residual_mat_sc[permutation,:]
permuted_lods_ypd = qtl_detection_adaptability.calculate_lods(genotype_mat, permuted_phenotype_matrix_ypd)
permuted_lods_sc = qtl_detection_adaptability.calculate_lods(genotype_mat, permuted_phenotype_matrix_sc)
permuted_lods = permuted_lods_sc + permuted_lods_ypd
bootstrapped_lods.append(numpy.nanmax(permuted_lods))
sig_threshold = numpy.sort(bootstrapped_lods)[-49]
print 'sig_threshold =', sig_threshold
#print numpy.sort(bootstrapped_lods)
#pt.plot(permuted_lods,'g')
#pt.plot(lods,'b')
#pt.axhline(sig_threshold,0,1,'k')
#pt.show()
if top_lod > sig_threshold:
current_QTLs.append(top_lod_idx)
else:
print 'all_QTLs_found'
all_QTLs_found = True
return current_QTLs, beta_sc, beta_ypd | mit |
kashif/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 43 | 39945 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from itertools import product
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import skip_if_32bit
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def check_classification_toy(presort, loss):
# Check classification on a toy dataset.
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1, presort=presort)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert_true(np.any(deviance_decrease >= 0.0))
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_classification_toy():
for presort, loss in product(('auto', True, False),
('deviance', 'exponential')):
yield check_classification_toy, presort, loss
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def check_classification_synthetic(presort, loss):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.09)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, subsample=0.5,
random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.08)
def test_classification_synthetic():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_synthetic, presort, loss
def check_boston(presort, loss, subsample):
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
ones = np.ones(len(boston.target))
last_y_pred = None
for sample_weight in None, ones, 2 * ones:
clf = GradientBoostingRegressor(n_estimators=100,
loss=loss,
max_depth=4,
subsample=subsample,
min_samples_split=2,
random_state=1,
presort=presort)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_less(mse, 6.0)
if last_y_pred is not None:
assert_array_almost_equal(last_y_pred, y_pred)
last_y_pred = y_pred
def test_boston():
for presort, loss, subsample in product(('auto', True, False),
('ls', 'lad', 'huber'),
(1.0, 0.5)):
yield check_boston, presort, loss, subsample
def check_iris(presort, subsample, sample_weight):
# Check consistency on dataset iris.
clf = GradientBoostingClassifier(n_estimators=100,
loss='deviance',
random_state=1,
subsample=subsample,
presort=presort)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_iris():
ones = np.ones(len(iris.target))
for presort, subsample, sample_weight in product(('auto', True, False),
(1.0, 0.5),
(None, ones)):
yield check_iris, presort, subsample, sample_weight
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 2, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
clf = GradientBoostingRegressor(presort=presort)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 5.0)
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 1700.0)
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 0.015)
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
for presort in True, False:
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=2, random_state=1,
presort=presort)
clf.fit(X, y)
assert_true(hasattr(clf, 'feature_importances_'))
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = (
clf.feature_importances_ > clf.feature_importances_.mean())
assert_array_almost_equal(X_new, X[:, feature_mask])
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert_equal(clf.oob_improvement_.shape[0], 100)
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
assert_equal(clf.oob_improvement_.shape[0], clf.n_estimators)
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert_equal(est.estimators_[0, 0].max_depth, 1)
for i in range(1, 11):
assert_equal(est.estimators_[-i, 0].max_depth, 2)
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2,
loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
def check_sparse_input(EstimatorClass, X, X_sparse, y):
dense = EstimatorClass(n_estimators=10, random_state=0,
max_depth=2).fit(X, y)
sparse = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort=False).fit(X_sparse, y)
auto = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort='auto').fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
assert_array_almost_equal(sparse.apply(X), auto.apply(X))
assert_array_almost_equal(sparse.predict(X), auto.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
auto.feature_importances_)
if isinstance(EstimatorClass, GradientBoostingClassifier):
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
assert_array_almost_equal(sparse.predict_proba(X),
auto.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
auto.predict_log_proba(X))
@skip_if_32bit
def test_sparse_input():
ests = (GradientBoostingClassifier, GradientBoostingRegressor)
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for EstimatorClass, sparse_matrix in product(ests, sparse_matrices):
yield check_sparse_input, EstimatorClass, X, sparse_matrix(X), y
| bsd-3-clause |
rahuldhote/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 103 | 41083 | from itertools import product
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn.cross_validation import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([0.0], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_true(np.mean(rgs.predict(iris.data).round() == iris.target)
> 0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity')
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity')
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[])
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError,
nbrs.kneighbors_graph,
X, mode='blah')
assert_raises(ValueError,
nbrs.radius_neighbors_graph,
X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
metric = lambda x1, x2: np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto', metric=metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute', metric=metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(DeprecationWarning, neighbors.KNeighborsClassifier,
metric='wminkowski', w=np.ones(10))
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph,
nbrs1.radius_neighbors_graph(X).toarray())
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y) | bsd-3-clause |
DiCarloLab-Delft/PycQED_py3 | pycqed/instrument_drivers/meta_instrument/LutMans/flux_lutman_vcz.py | 1 | 44225 | from .base_lutman import Base_LutMan, get_wf_idx_from_name
import numpy as np
from copy import copy
from qcodes.instrument.parameter import ManualParameter, InstrumentRefParameter
from qcodes.utils import validators as vals
from pycqed.measurement.waveform_control_CC import waveform as wf
from pycqed.measurement.waveform_control_CC import waveforms_flux as wfl
from pycqed.measurement.waveform_control_CC import waveforms_vcz as wf_vcz
import PyQt5
from qcodes.plots.pyqtgraph import QtPlot
import matplotlib.pyplot as plt
from pycqed.analysis.tools.plotting import set_xlabel, set_ylabel
import time
import logging
log = logging.getLogger(__name__)
"""
The default schema of this LutMap allows for 4 different 2Q gates.
NW NE
\ /
Q
/ \
SW SE
First codeword is assigned to idling.
Codewords 2-5 are assigned to the two-qubit gates in clockwise order
(NE - SE - SW - NW)
Then we assign single qubit fluxing operations (parking and square)
Last codeword is reserved for custom waveforms defined by the user.
Args:
lutmap
Return:
valid (bool)
The schema for a lutmap is a dictionary with integer keys.
Every item in the dictionary must have the following keys:
"name" : str
"type" : one of valid_types
{'idle', 'cz', 'idle_z', 'square', 'custom'}
"which": str, optional used for two qubit flux pulses and one of
{"NE", "SE", "SW", "NW"}
"""
_def_lm = {
0: {"name": "i", "type": "idle"},
1: {"name": "cz_NE", "type": "idle_z", "which": "NE"},
2: {"name": "cz_SE", "type": "cz", "which": "SE"},
3: {"name": "cz_SW", "type": "cz", "which": "SW"},
4: {"name": "cz_NW", "type": "idle_z", "which": "NW"},
5: {"name": "park", "type": "square"},
6: {"name": "square", "type": "square"},
7: {"name": "custom_wf", "type": "custom"},
}
class Base_Flux_LutMan(Base_LutMan):
"""
The default scheme of this LutMap allows for 4 different 2Q gates.
NW NE
\ /
Q
/ \
SW SE
"""
def render_wave(
self,
wave_name,
time_units="s",
reload_pulses: bool = True,
render_distorted_wave: bool = True,
QtPlot_win=None,
):
"""
Renders a waveform
"""
if reload_pulses:
self.generate_standard_waveforms()
x = np.arange(len(self._wave_dict[wave_name]))
y = self._wave_dict[wave_name]
if time_units == "lut_index":
xlab = ("Lookuptable index", "i")
elif time_units == "s":
x = x / self.sampling_rate()
xlab = ("Time", "s")
if QtPlot_win is None:
QtPlot_win = QtPlot(window_title=wave_name, figsize=(600, 400))
if render_distorted_wave:
if wave_name in self._wave_dict_dist.keys():
x2 = np.arange(len(self._wave_dict_dist[wave_name]))
if time_units == "s":
x2 = x2 / self.sampling_rate()
y2 = self._wave_dict_dist[wave_name]
QtPlot_win.add(
x=x2,
y=y2,
name=wave_name + " distorted",
symbol="o",
symbolSize=5,
xlabel=xlab[0],
xunit=xlab[1],
ylabel="Amplitude",
yunit="dac val.",
)
else:
log.warning("Wave not in distorted wave dict")
# Plotting the normal one second ensures it is on top.
QtPlot_win.add(
x=x,
y=y,
name=wave_name,
symbol="o",
symbolSize=5,
xlabel=xlab[0],
xunit=xlab[1],
ylabel="Amplitude",
yunit="V",
)
return QtPlot_win
class HDAWG_Flux_LutMan(Base_Flux_LutMan):
def __init__(self, name, **kw):
super().__init__(name, **kw)
self._wave_dict_dist = dict()
self.sampling_rate(2.4e9)
self._add_qubit_parameters()
self._add_cz_sim_parameters()
def set_default_lutmap(self):
"""Set the default lutmap for standard microwave drive pulses."""
self.LutMap(_def_lm.copy())
def generate_standard_waveforms(self):
"""
Generate all the standard waveforms and populates self._wave_dict
"""
self._wave_dict = {}
# N.B. the naming convention ._gen_{waveform_name} must be preserved
# as it is used in the load_waveform_onto_AWG_lookuptable method.
self._wave_dict["i"] = self._gen_i()
self._wave_dict["square"] = self._gen_square()
self._wave_dict["park"] = self._gen_park()
self._wave_dict["custom_wf"] = self._gen_custom_wf()
for _, waveform in self.LutMap().items():
wave_name = waveform["name"]
if waveform["type"] == "cz" or waveform["type"] == "idle_z":
which_gate = waveform["which"]
if waveform["type"] == "cz":
self._wave_dict[wave_name] = self._gen_cz(which_gate=which_gate)
elif waveform["type"] == "idle_z":
# The vcz pulse itself has all parameters necessary for the correction
self._wave_dict[wave_name] = self._gen_cz(which_gate=which_gate)
def _gen_i(self):
return np.zeros(int(self.idle_pulse_length() * self.sampling_rate()))
def _gen_square(self):
return wf.single_channel_block(
amp=self.sq_amp(),
length=self.sq_length(),
sampling_rate=self.sampling_rate(),
delay=self.sq_delay(),
)
def _gen_park(self):
if self.park_double_sided():
ones = np.ones(int(self.park_length() * self.sampling_rate() / 2))
pulse_pos = self.park_amp() * ones
return np.concatenate((pulse_pos, - pulse_pos))
else:
return self.park_amp() * np.ones(
int(self.park_length() * self.sampling_rate())
)
def _add_qubit_parameters(self):
"""
Adds parameters responsible for keeping track of qubit frequencies,
coupling strengths etc.
"""
self.add_parameter(
"q_polycoeffs_freq_01_det",
docstring="Coefficients of the polynomial used to convert "
"amplitude in V to detuning in Hz. \nN.B. it is important to "
"include both the AWG range and channel amplitude in the params.\n"
"N.B.2 Sign convention: positive detuning means frequency is "
"higher than current frequency, negative detuning means its "
"smaller.\n"
"In order to convert a set of cryoscope flux arc coefficients to "
" units of Volts they can be rescaled using [c0*sc**2, c1*sc, c2]"
" where sc is the desired scaling factor that includes the sq_amp "
"used and the range of the AWG (5 in amp mode).",
vals=vals.Arrays(),
# initial value is chosen to not raise errors
initial_value=np.array([-2e9, 0, 0]),
parameter_class=ManualParameter,
)
self.add_parameter(
"q_polycoeffs_anharm",
docstring="coefficients of the polynomial used to calculate "
"the anharmonicity (Hz) as a function of amplitude in V. "
"N.B. it is important to "
"include both the AWG range and channel amplitude in the params.\n",
vals=vals.Arrays(),
# initial value sets a flux independent anharmonicity of 300MHz
initial_value=np.array([0, 0, -300e6]),
parameter_class=ManualParameter,
)
self.add_parameter(
"q_freq_01",
vals=vals.Numbers(),
docstring="Current operating frequency of qubit",
# initial value is chosen to not raise errors
initial_value=6e9,
unit="Hz",
parameter_class=ManualParameter,
)
for this_cz in ["NE", "NW", "SW", "SE"]:
self.add_parameter(
"q_freq_10_%s" % this_cz,
vals=vals.Numbers(),
docstring="Current operating frequency of qubit"
" with which a CZ gate can be performed.",
# initial value is chosen to not raise errors
initial_value=6e9,
unit="Hz",
parameter_class=ManualParameter,
)
self.add_parameter(
"q_J2_%s" % this_cz,
vals=vals.Numbers(1e3, 500e6),
unit="Hz",
docstring="effective coupling between the 11 and 02 states.",
# initial value is chosen to not raise errors
initial_value=15e6,
parameter_class=ManualParameter,
)
def _add_waveform_parameters(self):
# CODEWORD 1: Idling
self.add_parameter(
"idle_pulse_length",
unit="s",
label="Idling pulse length",
initial_value=40e-9,
vals=vals.Numbers(0, 100e-6),
parameter_class=ManualParameter,
)
# CODEWORDS 1-4: CZ
# [2020-06-23] This dictionary is added here to be extended if a new or
# different flux waveform for cz is to be tested
# The cz waveform generators receive the `fluxlutman` and `which_gate`
# as arguments
self._cz_wf_generators_dict = {
"vcz_waveform": wf_vcz.vcz_waveform
}
for this_cz in ["NE", "NW", "SW", "SE"]:
self.add_parameter(
"cz_wf_generator_%s" % this_cz,
initial_value="vcz_dev_waveform",
vals=vals.Strings(),
parameter_class=ManualParameter,
)
wf_vcz.add_vcz_parameters(self, which_gate=this_cz)
# CODEWORD 5: Parking
self.add_parameter(
"park_length",
unit="s",
label="Parking pulse duration",
initial_value=40e-9,
vals=vals.Numbers(0, 100e-6),
parameter_class=ManualParameter,
)
self.add_parameter(
"park_amp",
initial_value=0,
label="Parking pulse amp. pos.",
docstring="Parking pulse amplitude if `park_double_sided` is `False`, "
"or positive amplitude for Net-Zero",
unit="dac value",
vals=vals.Numbers(),
parameter_class=ManualParameter,
)
self.add_parameter(
"park_double_sided",
initial_value=False,
vals=vals.Bool(),
parameter_class=ManualParameter,
)
# CODEWORD 6: SQUARE
self.add_parameter(
"sq_amp",
initial_value=0.5,
# units is part of the total range of AWG8
label="Square pulse amplitude",
unit="dac value",
vals=vals.Numbers(),
parameter_class=ManualParameter,
)
self.add_parameter(
"sq_length",
unit="s",
label="Square pulse duration",
initial_value=40e-9,
vals=vals.Numbers(0, 100e-6),
parameter_class=ManualParameter,
)
self.add_parameter(
"sq_delay",
unit="s",
label="Square pulse delay",
initial_value=0e-9,
vals=vals.Numbers(0, 100e-6),
parameter_class=ManualParameter,
)
# CODEWORD 7: CUSTOM
self.add_parameter(
"custom_wf",
initial_value=np.array([]),
label="Custom waveform",
docstring=(
"Specifies a custom waveform, note that "
"`custom_wf_length` is used to cut of the waveform if"
"it is set."
),
parameter_class=ManualParameter,
vals=vals.Arrays(),
)
self.add_parameter(
"custom_wf_length",
unit="s",
label="Custom waveform length",
initial_value=np.inf,
docstring=(
"Used to determine at what sample the custom waveform "
"is forced to zero. This is used to facilitate easy "
"cryoscope measurements of custom waveforms."
),
parameter_class=ManualParameter,
vals=vals.Numbers(min_value=0),
)
def _gen_cz(self, which_gate, regenerate_cz=True):
gate_str = "cz_%s" % which_gate
wf_generator_name = self.get("cz_wf_generator_{}".format(which_gate))
wf_generator = self._cz_wf_generators_dict[wf_generator_name]
if regenerate_cz:
self._wave_dict[gate_str] = wf_generator(self, which_gate=which_gate)
cz_pulse = self._wave_dict[gate_str]
return cz_pulse
def calc_amp_to_eps(
self,
amp: float,
state_A: str = "01",
state_B: str = "02",
which_gate: str = "NE",
):
"""
Calculates detuning between two levels as a function of pulse
amplitude in Volt.
ε(V) = f_B (V) - f_A (V)
Args:
amp (float) : amplitude in Volt
state_A (str) : string of 2 numbers denoting the state. The numbers
correspond to the number of excitations in each qubits.
The LSQ (right) corresponds to the qubit being fluxed and
under control of this flux lutman.
state_B (str) :
N.B. this method assumes that the polycoeffs are with respect to the
amplitude in units of V, including rescaling due to the channel
amplitude and range settings of the AWG8.
See also `self.get_dac_val_to_amp_scalefactor`.
amp_Volts = amp_dac_val * channel_amp * channel_range
"""
polycoeffs_A = self.get_polycoeffs_state(state=state_A, which_gate=which_gate)
polycoeffs_B = self.get_polycoeffs_state(state=state_B, which_gate=which_gate)
polycoeffs = polycoeffs_B - polycoeffs_A
return np.polyval(polycoeffs, amp)
def calc_eps_to_dac(
self,
eps,
state_A: str = "01",
state_B: str = "02",
which_gate: str = "NE",
positive_branch=True,
):
"""
See `calc_eps_to_amp`
"""
return (
self.calc_eps_to_amp(eps, state_A, state_B, which_gate, positive_branch)
* self.get_amp_to_dac_val_scalefactor()
)
def calc_eps_to_amp(
self,
eps,
state_A: str = "01",
state_B: str = "02",
which_gate: str = "NE",
positive_branch=True,
):
"""
Calculates amplitude in Volt corresponding to an energy difference
between two states in Hz.
V(ε) = V(f_b - f_a)
N.B. this method assumes that the polycoeffs are with respect to the
amplitude in units of V, including rescaling due to the channel
amplitude and range settings of the AWG8.
See also `self.get_dac_val_to_amp_scalefactor`.
amp_Volts = amp_dac_val * channel_amp * channel_range
"""
# recursive allows dealing with an array of freqs
if isinstance(eps, (list, np.ndarray)):
return np.array(
[
self.calc_eps_to_amp(
eps=e,
state_A=state_A,
state_B=state_B,
which_gate=which_gate,
positive_branch=positive_branch,
)
for e in eps
]
)
polycoeffs_A = self.get_polycoeffs_state(state=state_A, which_gate=which_gate)
if state_B is not None:
polycoeffs_B = self.get_polycoeffs_state(
state=state_B, which_gate=which_gate
)
polycoeffs = polycoeffs_B - polycoeffs_A
else:
polycoeffs = copy(polycoeffs_A)
polycoeffs[-1] = 0
p = np.poly1d(polycoeffs)
sols = (p - eps).roots
# sols returns 2 solutions (for a 2nd order polynomial)
if positive_branch:
sol = np.max(sols)
else:
sol = np.min(sols)
# imaginary part is ignored, instead sticking to closest real value
# float is because of a typecasting bug in np 1.12 (solved in 1.14)
return float(np.real(sol))
def calc_net_zero_length_ratio(self, which_gate: str = "NE"):
"""
Determine the lenght ratio of the net-zero pulses based on the
parameter "czd_length_ratio".
If czd_length_ratio is set to auto, uses the interaction amplitudes
to determine the scaling of lengths. Note that this is a coarse
approximation.
"""
czd_length_ratio = self.get("czd_length_ratio_%s" % which_gate)
if czd_length_ratio != "auto":
return czd_length_ratio
else:
amp_J2_pos = self.calc_eps_to_amp(
0,
state_A="11",
state_B="02",
which_gate=which_gate,
positive_branch=True,
)
amp_J2_neg = self.calc_eps_to_amp(
0,
state_A="11",
state_B="02",
which_gate=which_gate,
positive_branch=False,
)
# lr chosen to satisfy (amp_pos*lr + amp_neg*(1-lr) = 0 )
lr = -amp_J2_neg / (amp_J2_pos - amp_J2_neg)
return lr
def get_polycoeffs_state(self, state: str, which_gate: str = "NE"):
"""
Args:
state (str) : string of 2 numbers denoting the state. The numbers
correspond to the number of excitations in each qubits.
The LSQ (right) corresponds to the qubit being fluxed and
under control of this flux lutman.
Get's the polynomial coefficients that are used to calculate the
energy levels of specific states.
Note that avoided crossings are not taken into account here.
N.B. The value of which_gate (and its default) only affect the
other qubits (here noted as MSQ)
"""
# Depending on the interaction (North or South) this qubit fluxes or not.
# depending or whether it fluxes, it is LSQ or MSQ
# depending on that, we use q_polycoeffs_freq_01_det or q_polycoeffs_freq_NE_det
polycoeffs = np.zeros(3)
freq_10 = self.get("q_freq_10_%s" % which_gate)
if state == "00":
pass
elif state == "01":
polycoeffs += self.q_polycoeffs_freq_01_det()
polycoeffs[2] += self.q_freq_01()
elif state == "02":
polycoeffs += 2 * self.q_polycoeffs_freq_01_det()
polycoeffs += self.q_polycoeffs_anharm()
polycoeffs[2] += 2 * self.q_freq_01()
elif state == "10":
polycoeffs[2] += freq_10
elif state == "11":
polycoeffs += self.q_polycoeffs_freq_01_det()
polycoeffs[2] += self.q_freq_01() + freq_10
else:
raise ValueError("State {} not recognized".format(state))
return polycoeffs
def _get_awg_channel_amplitude(self):
AWG = self.AWG.get_instr()
awg_ch = self.cfg_awg_channel() - 1 # -1 is to account for starting at 1
awg_nr = awg_ch // 2
ch_pair = awg_ch % 2
channel_amp = AWG.get("awgs_{}_outputs_{}_amplitude".format(awg_nr, ch_pair))
return channel_amp
def _set_awg_channel_amplitude(self, val):
AWG = self.AWG.get_instr()
awg_ch = self.cfg_awg_channel() - 1 # -1 is to account for starting at 1
awg_nr = awg_ch // 2
ch_pair = awg_ch % 2
AWG.set("awgs_{}_outputs_{}_amplitude".format(awg_nr, ch_pair), val)
def _get_awg_channel_range(self):
AWG = self.AWG.get_instr()
awg_ch = self.cfg_awg_channel() - 1 # -1 is to account for starting at 1
# channel range of 5 corresponds to -2.5V to +2.5V
for i in range(5):
channel_range_pp = AWG.get("sigouts_{}_range".format(awg_ch))
if channel_range_pp is not None:
break
time.sleep(0.5)
return channel_range_pp
def _get_wf_name_from_cw(self, codeword: int):
for idx, waveform in self.LutMap().items():
if int(idx) == codeword:
return waveform["name"]
raise ValueError("Codeword {} not specified" " in LutMap".format(codeword))
def _get_cw_from_wf_name(self, wf_name: str):
for idx, waveform in self.LutMap().items():
if wf_name == waveform["name"]:
return int(idx)
raise ValueError("Waveform {} not specified" " in LutMap".format(wf_name))
def _gen_custom_wf(self):
base_wf = copy(self.custom_wf())
if self.custom_wf_length() != np.inf:
# cuts of the waveform at a certain length by setting
# all subsequent samples to 0.
max_sample = int(self.custom_wf_length() * self.sampling_rate())
base_wf[max_sample:] = 0
return base_wf
def calc_freq_to_amp(
self,
freq: float,
state: str = "01",
which_gate: str = "NE",
positive_branch=True,
):
"""
Calculates amplitude in Volt corresponding to the energy of a state
in Hz.
N.B. this method assumes that the polycoeffs are with respect to the
amplitude in units of V, including rescaling due to the channel
amplitude and range settings of the AWG8.
See also `self.get_dac_val_to_amp_scalefactor`.
amp_Volts = amp_dac_val * channel_amp * channel_range
"""
return self.calc_eps_to_amp(
eps=freq,
state_B=state,
state_A="00",
positive_branch=positive_branch,
which_gate=which_gate,
)
def _add_cfg_parameters(self):
self.add_parameter(
"cfg_awg_channel",
initial_value=1,
vals=vals.Ints(1, 8),
parameter_class=ManualParameter,
)
self.add_parameter(
"cfg_distort",
initial_value=True,
vals=vals.Bool(),
parameter_class=ManualParameter,
)
self.add_parameter(
"cfg_append_compensation",
docstring=(
"If True compensation pulses will be added to individual "
" waveforms creating very long waveforms for each codeword"
),
initial_value=True,
vals=vals.Bool(),
parameter_class=ManualParameter,
)
self.add_parameter(
"cfg_compensation_delay",
initial_value=3e-6,
unit="s",
vals=vals.Numbers(),
parameter_class=ManualParameter,
)
self.add_parameter(
"cfg_pre_pulse_delay",
unit="s",
label="Pre pulse delay",
docstring="This parameter is used for fine timing corrections, the"
" correction is applied in distort_waveform.",
initial_value=0e-9,
vals=vals.Numbers(0, 1e-6),
parameter_class=ManualParameter,
)
self.add_parameter(
"instr_distortion_kernel", parameter_class=InstrumentRefParameter
)
self.add_parameter(
"instr_partner_lutman", # FIXME: unused?
docstring="LutMan responsible for the corresponding"
"channel in the AWG8 channel pair. "
"Reference is used when uploading waveforms",
parameter_class=InstrumentRefParameter,
)
self.add_parameter(
"_awgs_fl_sequencer_program_expected_hash", # FIXME: un used?
docstring="crc32 hash of the awg8 sequencer program. "
"This parameter is used to dynamically determine "
"if the program needs to be uploaded. The initial_value is"
" None, indicating that the program needs to be uploaded."
" After the first program is uploaded, the value is set.",
initial_value=None,
vals=vals.Ints(),
parameter_class=ManualParameter,
)
self.add_parameter(
"cfg_max_wf_length",
parameter_class=ManualParameter,
initial_value=10e-6,
unit="s",
vals=vals.Numbers(0, 100e-6),
)
self.add_parameter(
"cfg_awg_channel_range",
docstring="peak peak value, channel range of 5 corresponds to -2.5V to +2.5V",
get_cmd=self._get_awg_channel_range,
unit="V_pp",
)
self.add_parameter(
"cfg_awg_channel_amplitude",
docstring="digital scale factor between 0 and 1",
get_cmd=self._get_awg_channel_amplitude,
set_cmd=self._set_awg_channel_amplitude,
unit="a.u.",
vals=vals.Numbers(0, 1),
)
def get_dac_val_to_amp_scalefactor(self):
"""
Returns the scale factor to transform an amplitude in 'dac value' to an
amplitude in 'V'.
"dac_value" refers to the value between -1 and +1 that is set in a
waveform.
N.B. the implementation is specific to this type of AWG
"""
if self.AWG() is None:
log.warning("No AWG present, returning unity scale factor.")
return 1
channel_amp = self.cfg_awg_channel_amplitude()
channel_range_pp = self.cfg_awg_channel_range()
# channel range of 5 corresponds to -2.5V to +2.5V
scalefactor = channel_amp * (channel_range_pp / 2)
return scalefactor
def get_amp_to_dac_val_scalefactor(self):
if self.get_dac_val_to_amp_scalefactor() == 0:
# Give a warning and don't raise an error as things should not
# break because of this.
log.warning(
'AWG amp to dac scale factor is 0, check "{}" '
"output amplitudes".format(self.AWG())
)
return 1
return 1 / self.get_dac_val_to_amp_scalefactor()
def calc_amp_to_freq(self, amp: float, state: str = "01", which_gate: str = "NE"):
"""
Converts pulse amplitude in Volt to energy in Hz for a particular state
Args:
amp (float) : amplitude in Volt
state (str) : string of 2 numbers denoting the state. The numbers
correspond to the number of excitations in each qubits.
The LSQ (right) corresponds to the qubit being fluxed and
under control of this flux lutman.
N.B. this method assumes that the polycoeffs are with respect to the
amplitude in units of V, including rescaling due to the channel
amplitude and range settings of the AWG8.
See also `self.get_dac_val_to_amp_scalefactor`.
N.B. The value of which_gate (and its default) only affect the
other qubit frequencies (here noted as MSQ 10)
amp_Volts = amp_dac_val * channel_amp * channel_range
"""
polycoeffs = self.get_polycoeffs_state(state=state, which_gate=which_gate)
return np.polyval(polycoeffs, amp)
#################################
# Waveform loading methods #
#################################
def load_waveform_onto_AWG_lookuptable(
self, wave_id: str, regenerate_waveforms: bool = False
):
"""
Loads a specific waveform to the AWG
"""
# Here we are ductyping to determine if the waveform name or the
# codeword was specified.
if type(wave_id) == str:
waveform_name = wave_id
codeword = get_wf_idx_from_name(wave_id, self.LutMap())
else:
waveform_name = self.LutMap()[wave_id]["name"]
codeword = wave_id
if regenerate_waveforms:
# only regenerate the one waveform that is desired
if "cz" in waveform_name:
# CZ gates contain information on which pair (NE, SE, SW, NW)
# the gate is performed with this is specified in which_gate.
gen_wf_func = getattr(self, "_gen_cz")
self._wave_dict[waveform_name] = gen_wf_func(
which_gate=waveform_name[3:]
)
else:
gen_wf_func = getattr(self, "_gen_{}".format(waveform_name))
self._wave_dict[waveform_name] = gen_wf_func()
waveform = self._wave_dict[waveform_name]
codeword_str = "wave_ch{}_cw{:03}".format(self.cfg_awg_channel(), codeword)
if self.cfg_append_compensation():
waveform = self.add_compensation_pulses(waveform)
if self.cfg_distort():
# This is where the fixed length waveform is
# set to cfg_max_wf_length
waveform = self.distort_waveform(waveform)
self._wave_dict_dist[waveform_name] = waveform
else:
# This is where the fixed length waveform is
# set to cfg_max_wf_length
waveform = self._append_zero_samples(waveform)
self._wave_dict_dist[waveform_name] = waveform
self.AWG.get_instr().set(codeword_str, waveform)
def load_waveforms_onto_AWG_lookuptable(
self, regenerate_waveforms: bool = True, stop_start: bool = True
):
"""
Loads all waveforms specified in the LutMap to an AWG for both this
LutMap and the partner LutMap.
Args:
regenerate_waveforms (bool): if True calls
generate_standard_waveforms before uploading.
stop_start (bool): if True stops and starts the AWG.
"""
AWG = self.AWG.get_instr()
if stop_start:
AWG.stop()
for idx, waveform in self.LutMap().items():
self.load_waveform_onto_AWG_lookuptable(
wave_id=idx, regenerate_waveforms=regenerate_waveforms
)
self.cfg_awg_channel_amplitude()
self.cfg_awg_channel_range()
if stop_start:
AWG.start()
def _append_zero_samples(self, waveform):
"""
Helper method to ensure waveforms have the desired length
"""
length_samples = roundup1024(
int(self.sampling_rate() * self.cfg_max_wf_length())
)
extra_samples = length_samples - len(waveform)
if extra_samples >= 0:
y_sig = np.concatenate([waveform, np.zeros(extra_samples)])
else:
y_sig = waveform[:extra_samples]
return y_sig
def add_compensation_pulses(self, waveform):
"""
Adds the inverse of the pulses at the end of a waveform to
ensure flux discharging.
"""
wf = np.array(waveform) # catches a rare bug when wf is a list
delay_samples = np.zeros(
int(self.sampling_rate() * self.cfg_compensation_delay())
)
comp_wf = np.concatenate([wf, delay_samples, -1 * wf])
return comp_wf
def distort_waveform(self, waveform, inverse=False):
"""
Modifies the ideal waveform to correct for distortions and correct
fine delays.
Distortions are corrected using the kernel object.
"""
k = self.instr_distortion_kernel.get_instr()
# Prepend zeros to delay waveform to correct for fine timing
delay_samples = int(self.cfg_pre_pulse_delay() * self.sampling_rate())
waveform = np.pad(waveform, (delay_samples, 0), "constant")
# duck typing the distort waveform method
if hasattr(k, "distort_waveform"):
distorted_waveform = k.distort_waveform(
waveform,
length_samples=int(
roundup1024(self.cfg_max_wf_length() * self.sampling_rate())
),
inverse=inverse,
)
else: # old kernel object does not have this method
if inverse:
raise NotImplementedError()
distorted_waveform = k.convolve_kernel(
[k.kernel(), waveform],
length_samples=int(self.cfg_max_wf_length() * self.sampling_rate()),
)
return distorted_waveform
#################################
# Plotting methods #
#################################
def plot_cz_trajectory(self, axs=None, show=True, which_gate="NE"):
"""
Plots the cz trajectory in frequency space.
"""
q_J2 = self.get("q_J2_%s" % which_gate)
if axs is None:
f, axs = plt.subplots(figsize=(5, 7), nrows=3, sharex=True)
dac_amps = self._wave_dict["cz_%s" % which_gate]
t = np.arange(0, len(dac_amps)) * 1 / self.sampling_rate()
CZ_amp = dac_amps * self.get_dac_val_to_amp_scalefactor()
CZ_eps = self.calc_amp_to_eps(CZ_amp, "11", "02", which_gate=which_gate)
CZ_theta = wfl.eps_to_theta(CZ_eps, q_J2)
axs[0].plot(t, np.rad2deg(CZ_theta), marker=".")
axs[0].fill_between(t, np.rad2deg(CZ_theta), color="C0", alpha=0.5)
set_ylabel(axs[0], r"$\theta$", "deg")
axs[1].plot(t, CZ_eps, marker=".")
axs[1].fill_between(t, CZ_eps, color="C0", alpha=0.5)
set_ylabel(axs[1], r"$\epsilon_{11-02}$", "Hz")
axs[2].plot(t, CZ_amp, marker=".")
axs[2].fill_between(t, CZ_amp, color="C0", alpha=0.1)
set_xlabel(axs[2], "Time", "s")
set_ylabel(axs[2], r"Amp.", "V")
# axs[2].set_ylim(-1, 1)
axs[2].axhline(0, lw=0.2, color="grey")
CZ_amp_pred = self.distort_waveform(CZ_amp)[: len(CZ_amp)]
axs[2].plot(t, CZ_amp_pred, marker=".")
axs[2].fill_between(t, CZ_amp_pred, color="C1", alpha=0.3)
if show:
plt.show()
return axs
def plot_level_diagram(self, ax=None, show=True, which_gate="NE"):
"""
Plots the level diagram as specified by the q_ parameters.
1. Plotting levels
2. Annotating feature of interest
3. Adding legend etc.
4. Add a twin x-axis to denote scale in dac amplitude
"""
if ax is None:
f, ax = plt.subplots()
# 1. Plotting levels
# maximum voltage of AWG in amp mode
amps = np.linspace(-2.5, 2.5, 101)
freqs = self.calc_amp_to_freq(amps, state="01", which_gate=which_gate)
ax.plot(amps, freqs, label="$f_{01}$")
ax.text(
0,
self.calc_amp_to_freq(0, state="01", which_gate=which_gate),
"01",
color="C0",
ha="left",
va="bottom",
clip_on=True,
)
freqs = self.calc_amp_to_freq(amps, state="02", which_gate=which_gate)
ax.plot(amps, freqs, label="$f_{02}$")
ax.text(
0,
self.calc_amp_to_freq(0, state="02", which_gate=which_gate),
"02",
color="C1",
ha="left",
va="bottom",
clip_on=True,
)
freqs = self.calc_amp_to_freq(amps, state="10", which_gate=which_gate)
ax.plot(amps, freqs, label="$f_{10}$")
ax.text(
0,
self.calc_amp_to_freq(0, state="10", which_gate=which_gate),
"10",
color="C2",
ha="left",
va="bottom",
clip_on=True,
)
freqs = self.calc_amp_to_freq(amps, state="11", which_gate=which_gate)
ax.plot(amps, freqs, label="$f_{11}$")
ax.text(
0,
self.calc_amp_to_freq(0, state="11", which_gate=which_gate),
"11",
color="C3",
ha="left",
va="bottom",
clip_on=True,
)
# 2. Annotating feature of interest
ax.axvline(0, 0, 1e10, linestyle="dotted", c="grey")
amp_J2 = self.calc_eps_to_amp(
0, state_A="11", state_B="02", which_gate=which_gate
)
amp_J1 = self.calc_eps_to_amp(
0, state_A="10", state_B="01", which_gate=which_gate
)
ax.axvline(amp_J2, ls="--", lw=1, c="C4")
ax.axvline(amp_J1, ls="--", lw=1, c="C6")
f_11_02 = self.calc_amp_to_freq(amp_J2, state="11", which_gate=which_gate)
ax.plot([amp_J2], [f_11_02], color="C4", marker="o", label="11-02")
ax.text(
amp_J2,
f_11_02,
"({:.4f},{:.2f})".format(amp_J2, f_11_02 * 1e-9),
color="C4",
ha="left",
va="bottom",
clip_on=True,
)
f_10_01 = self.calc_amp_to_freq(amp_J1, state="01", which_gate=which_gate)
ax.plot([amp_J1], [f_10_01], color="C5", marker="o", label="10-01")
ax.text(
amp_J1,
f_10_01,
"({:.4f},{:.2f})".format(amp_J1, f_10_01 * 1e-9),
color="C5",
ha="left",
va="bottom",
clip_on=True,
)
# 3. Adding legend etc.
title = "Calibration visualization\n{}\nchannel {}".format(
self.AWG(), self.cfg_awg_channel()
)
leg = ax.legend(title=title, loc=(1.05, 0.3))
leg._legend_box.align = "center"
set_xlabel(ax, "AWG amplitude", "V")
set_ylabel(ax, "Frequency", "Hz")
ax.set_xlim(-2.5, 2.5)
ax.set_ylim(
0, self.calc_amp_to_freq(0, state="02", which_gate=which_gate) * 1.1
)
# 4. Add a twin x-axis to denote scale in dac amplitude
dac_val_axis = ax.twiny()
dac_ax_lims = np.array(ax.get_xlim()) * self.get_amp_to_dac_val_scalefactor()
dac_val_axis.set_xlim(dac_ax_lims)
set_xlabel(dac_val_axis, "AWG amplitude", "dac")
dac_val_axis.axvspan(1, 1000, facecolor=".5", alpha=0.5)
dac_val_axis.axvspan(-1000, -1, facecolor=".5", alpha=0.5)
# get figure is here in case an axis object was passed as input
f = ax.get_figure()
f.subplots_adjust(right=0.7)
if show:
plt.show()
return ax
def plot_cz_waveforms(
self, qubits: list, which_gate_list: list, ax=None, show: bool = True
):
"""
Plots the cz waveforms from several flux lutamns, mainly for
verification, time alignment and debugging
"""
if ax is None:
fig, ax = plt.subplots(1, 1)
flux_lm_list = [
self.find_instrument("flux_lm_{}".format(qubit)) for qubit in qubits
]
for flux_lm, which_gate, qubit in zip(flux_lm_list, which_gate_list, qubits):
flux_lm.generate_standard_waveforms()
waveform_name = "cz_{}".format(which_gate)
ax.plot(
flux_lm._wave_dict[waveform_name],
".-",
label=waveform_name + " " + qubit,
)
ax.legend()
fig = ax.get_figure()
if show:
fig.show()
return fig
#################################
# Simulation methods #
#################################
def _add_cz_sim_parameters(self):
for this_cz in ["NE", "NW", "SW", "SE"]:
self.add_parameter(
"bus_freq_%s" % this_cz,
docstring="[CZ simulation] Bus frequency.",
vals=vals.Numbers(0.1e9, 1000e9),
initial_value=7.77e9,
parameter_class=ManualParameter,
)
self.add_parameter(
"instr_sim_control_CZ_%s" % this_cz,
docstring="Noise and other parameters for CZ simulation.",
parameter_class=InstrumentRefParameter,
)
self.add_parameter(
"step_response",
initial_value=np.array([]),
label="Step response",
docstring=(
"Stores the normalized flux line step response. "
"Intended for use in cz simulations with noise."
),
parameter_class=ManualParameter,
vals=vals.Arrays(),
)
class QWG_Flux_LutMan(HDAWG_Flux_LutMan):
def __init__(self, name, **kw):
super().__init__(name, **kw)
self._wave_dict_dist = dict()
self.sampling_rate(1e9)
def get_dac_val_to_amp_scalefactor(self):
"""
Returns the scale factor to transform an amplitude in 'dac value' to an
amplitude in 'V'.
N.B. the implementation is specific to this type of AWG (QWG)
"""
AWG = self.AWG.get_instr()
awg_ch = self.cfg_awg_channel()
channel_amp = AWG.get("ch{}_amp".format(awg_ch))
scale_factor = channel_amp
return scale_factor
def load_waveforms_onto_AWG_lookuptable(
self, regenerate_waveforms: bool = True, stop_start: bool = True
):
# We inherit from the HDAWG LutMan but do not require the fancy
# loading because the QWG is a simple device!
return Base_Flux_LutMan.load_waveforms_onto_AWG_lookuptable(
self, regenerate_waveforms=regenerate_waveforms, stop_start=stop_start
)
def _get_awg_channel_amplitude(self):
AWG = self.AWG.get_instr()
awg_ch = self.cfg_awg_channel()
channel_amp = AWG.get("ch{}_amp".format(awg_ch))
return channel_amp
def _set_awg_channel_amplitude(self, val):
AWG = self.AWG.get_instr()
awg_ch = self.cfg_awg_channel()
channel_amp = AWG.set("ch{}_amp".format(awg_ch), val)
return channel_amp
def _add_cfg_parameters(self):
self.add_parameter(
"cfg_awg_channel",
initial_value=1,
vals=vals.Ints(1, 4),
parameter_class=ManualParameter,
)
self.add_parameter(
"cfg_distort",
initial_value=True,
vals=vals.Bool(),
parameter_class=ManualParameter,
)
self.add_parameter(
"cfg_append_compensation",
docstring=(
"If True compensation pulses will be added to individual "
" waveforms creating very long waveforms for each codeword"
),
initial_value=True,
vals=vals.Bool(),
parameter_class=ManualParameter,
)
self.add_parameter(
"cfg_compensation_delay",
parameter_class=ManualParameter,
initial_value=3e-6,
unit="s",
vals=vals.Numbers(),
)
self.add_parameter(
"cfg_pre_pulse_delay",
unit="s",
label="Pre pulse delay",
docstring="This parameter is used for fine timing corrections, the"
" correction is applied in distort_waveform.",
initial_value=0e-9,
vals=vals.Numbers(0, 1e-6),
parameter_class=ManualParameter,
)
self.add_parameter(
"instr_distortion_kernel", parameter_class=InstrumentRefParameter
)
self.add_parameter(
"cfg_max_wf_length",
parameter_class=ManualParameter,
initial_value=10e-6,
unit="s",
vals=vals.Numbers(0, 100e-6),
)
self.add_parameter(
"cfg_awg_channel_amplitude",
docstring="Output amplitude from 0 to 1.6 V",
get_cmd=self._get_awg_channel_amplitude,
set_cmd=self._set_awg_channel_amplitude,
unit="V",
vals=vals.Numbers(0, 1.6),
)
#########################################################################
# Convenience functions below
#########################################################################
def roundup1024(n):
return int(np.ceil(n / 144) * 144)
| mit |
tpltnt/SimpleCV | SimpleCV/LineScan.py | 1 | 36021 | from __future__ import print_function
from SimpleCV.base import *
import scipy.signal as sps
import scipy.optimize as spo
import numpy as np
import copy, operator
class LineScan(list):
"""
**SUMMARY**
A line scan is a one dimensional signal pulled from the intensity
of a series of a pixels in an image. LineScan allows you to do a series
of operations just like on an image class object. You can also treat the
line scan as a python list object. A linescan object is automatically
generated by calling ImageClass.getLineScan on an image. You can also
roll your own by declaring a LineScan object and passing the constructor
a 1xN list of values.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> s = img.getLineScan(y=128)
>>>> ss = s.smooth()
>>>> plt.plot(s)
>>>> plt.plot(ss)
>>>> plt.show()
"""
pointLoc = None
image = None
def __init__(self, args, **kwargs):
if isinstance(args, np.ndarray):
args = args.tolist()
list.__init__(self,args)
self.image = None
self.pt1 = None
self.pt2 = None
self.row = None
self.col = None
self.channel = -1
for key in kwargs:
if key == 'pointLocs':
if kwargs[key] is not None:
self.pointLoc = kwargs[key]
if key == 'image':
if kwargs[key] is not None:
self.img = kwargs[key]
if key == 'pt1':
if kwargs[key] is not None:
self.pt1 = kwargs[key]
if key == 'pt2':
if kwargs[key] is not None:
self.pt2 = kwargs[key]
if key == "x":
if kwargs[key] is not None:
self.col = kwargs[key]
if key == "y":
if kwargs[key] is not None:
self.row = kwargs[key]
if key == "channel":
if kwargs[key] is not None:
self.channel = kwargs[key]
if(self.pointLoc is None):
self.pointLoc = zip(range(0,len(self)),range(0,len(self)))
def __getitem__(self,key):
"""
**SUMMARY**
Returns a LineScan when sliced. Previously used to
return list. Now it is possible to use LineScanm member
functions on sub-lists
"""
if type(key) is slice: #Or can use 'try:' for speed
return LineScan(list.__getitem__(self, key))
else:
return list.__getitem__(self,key)
def __getslice__(self, i, j):
"""
Deprecated since python 2.0, now using __getitem__
"""
return self.__getitem__(slice(i,j))
def __sub__(self,other):
if len(self) == len(other):
retVal = LineScan(map(operator.sub,self,other))
else:
print('Size mismatch')
return None
retVal._update(self)
return retVal
def __add__(self,other):
if len(self) == len(other):
retVal = LineScan(map(operator.add,self,other))
else:
print('Size mismatch')
return None
retVal._update(self)
return retVal
def __mul__(self,other):
if len(self) == len(other):
retVal = LineScan(map(operator.mul,self,other))
else:
print('Size mismatch')
return None
retVal._update(self)
return retVal
def __div__(self,other):
if len(self) == len(other):
try:
retVal = LineScan(map(operator.div,self,other))
except ZeroDivisionError:
print('Second LineScan contains zeros')
return None
else:
print('Size mismatch')
return None
retVal._update(self)
return retVal
def _update(self, linescan):
"""
** SUMMARY**
Updates LineScan's Instance Objects.
"""
self.image = linescan.image
self.pt1 = linescan.pt1
self.pt2 = linescan.pt2
self.row = linescan.row
self.col = linescan.col
self.channel = linescan.channel
self.pointLoc = linescan.pointLoc
def smooth(self,degree=3):
"""
**SUMMARY**
Perform a Gasusian simple smoothing operation on the signal.
**PARAMETERS**
* *degree* - The degree of the fitting function. Higher degree means more smoothing.
**RETURNS**
A smoothed LineScan object.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> plt.plot(sl)
>>>> plt.plot(sl.smooth(7))
>>>> plt.show()
**NOTES**
Cribbed from http://www.swharden.com/blog/2008-11-17-linear-data-smoothing-in-python/
"""
window=degree*2-1
weight=np.array([1.0]*window)
weightGauss=[]
for i in range(window):
i=i-degree+1
frac=i/float(window)
gauss=1/(np.exp((4*(frac))**2))
weightGauss.append(gauss)
weight=np.array(weightGauss)*weight
smoothed=[0.0]*(len(self)-window)
for i in range(len(smoothed)):
smoothed[i]=sum(np.array(self[i:i+window])*weight)/sum(weight)
# recenter the signal so it sits nicely on top of the old
front = self[0:(degree-1)]
front += smoothed
front += self[-1*degree:]
retVal = LineScan(front,image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
retVal._update(self)
return retVal
def normalize(self):
"""
**SUMMARY**
Normalize the signal so the maximum value is scaled to one.
**RETURNS**
A normalized scanline object.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> plt.plot(sl)
>>>> plt.plot(sl.normalize())
>>>> plt.show()
"""
temp = np.array(self, dtype='float32')
temp = temp / np.max(temp)
retVal = LineScan(list(temp[:]),image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
retVal._update(self)
return retVal
def scale(self,value_range=(0,1)):
"""
**SUMMARY**
Scale the signal so the maximum and minimum values are
all scaled to the values in value_range. This is handy
if you want to compare the shape of two signals that
are scaled to different ranges.
**PARAMETERS**
* *value_range* - A tuple that provides the lower and upper bounds
for the output signal.
**RETURNS**
A scaled LineScan object.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> plt.plot(sl)
>>>> plt.plot(sl.scale(value_range(0,255)))
>>>> plt.show()
**SEE ALSO**
"""
temp = np.array(self, dtype='float32')
vmax = np.max(temp)
vmin = np.min(temp)
a = np.min(value_range)
b = np.max(value_range)
temp = (((b-a)/(vmax-vmin))*(temp-vmin))+a
retVal = LineScan(list(temp[:]),image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
retVal._update(self)
return retVal
def minima(self):
"""
**SUMMARY**
The function the global minima in the line scan.
**RETURNS**
Returns a list of tuples of the format:
(LineScanIndex,MinimaValue,(image_position_x,image_position_y))
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> minima = sl.smooth().minima()
>>>> plt.plot(sl)
>>>> for m in minima:
>>>> plt.plot(m[0],m[1],'ro')
>>>> plt.show()
"""
# all of these functions should return
# value, index, pixel coordinate
# [(index,value,(pix_x,pix_y))...]
minvalue = np.min(self)
idxs = np.where(np.array(self)==minvalue)[0]
minvalue = np.ones((1,len(idxs)))*minvalue # make zipable
minvalue = minvalue[0]
pts = np.array(self.pointLoc)
pts = pts[idxs]
pts = [(p[0],p[1]) for p in pts] # un numpy this
return zip(idxs,minvalue,pts)
def maxima(self):
"""
**SUMMARY**
The function finds the global maxima in the line scan.
**RETURNS**
Returns a list of tuples of the format:
(LineScanIndex,MaximaValue,(image_position_x,image_position_y))
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> maxima = sl.smooth().maxima()
>>>> plt.plot(sl)
>>>> for m in maxima:
>>>> plt.plot(m[0],m[1],'ro')
>>>> plt.show()
"""
# all of these functions should return
# value, index, pixel coordinate
# [(index,value,(pix_x,pix_y))...]
maxvalue = np.max(self)
idxs = np.where(np.array(self)==maxvalue)[0]
maxvalue = np.ones((1,len(idxs)))*maxvalue # make zipable
maxvalue = maxvalue[0]
pts = np.array(self.pointLoc)
pts = pts[idxs]
pts = [(p[0],p[1]) for p in pts] # un numpy
return zip(idxs,maxvalue,pts)
def derivative(self):
"""
**SUMMARY**
This function finds the discrete derivative of the signal.
The discrete derivative is simply the difference between each
succesive samples. A good use of this function is edge detection
**RETURNS**
Returns the discrete derivative function as a LineScan object.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> plt.plot(sl)
>>>> plt.plot(sl.derivative())
>>>> plt.show()
"""
temp = np.array(self,dtype='float32')
d = [0]
d += list(temp[1:]-temp[0:-1])
retVal = LineScan(d,image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
#retVal.image = self.image
#retVal.pointLoc = self.pointLoc
return retVal
def localMaxima(self):
"""
**SUMMARY**
The function finds local maxima in the line scan. Local maxima
are defined as points that are greater than their neighbors to
the left and to the right.
**RETURNS**
Returns a list of tuples of the format:
(LineScanIndex,MaximaValue,(image_position_x,image_position_y))
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> maxima = sl.smooth().maxima()
>>>> plt.plot(sl)
>>>> for m in maxima:
>>>> plt.plot(m[0],m[1],'ro')
>>>> plt.show()
"""
temp = np.array(self)
idx = np.r_[True, temp[1:] > temp[:-1]] & np.r_[temp[:-1] > temp[1:], True]
idx = np.where(idx==True)[0]
values = temp[idx]
pts = np.array(self.pointLoc)
pts = pts[idx]
pts = [(p[0],p[1]) for p in pts] # un numpy
return zip(idx,values,pts)
def localMinima(self):
"""""
**SUMMARY**
The function the local minima in the line scan. Local minima
are defined as points that are less than their neighbors to
the left and to the right.
**RETURNS**
Returns a list of tuples of the format:
(LineScanIndex,MinimaValue,(image_position_x,image_position_y))
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> minima = sl.smooth().minima()
>>>> plt.plot(sl)
>>>> for m in minima:
>>>> plt.plot(m[0],m[1],'ro')
>>>> plt.show()
"""
temp = np.array(self)
idx = np.r_[True, temp[1:] < temp[:-1]] & np.r_[temp[:-1] < temp[1:], True]
idx = np.where(idx==True)[0]
values = temp[idx]
pts = np.array(self.pointLoc)
pts = pts[idx]
pts = [(p[0],p[1]) for p in pts] # un numpy
return zip(idx,values,pts)
def resample(self,n=100):
"""
**SUMMARY**
Resample the signal to fit into n samples. This method is
handy if you would like to resize multiple signals so that
they fit together nice. Note that using n < len(LineScan)
can cause data loss.
**PARAMETERS**
* *n* - The number of samples to resample to.
**RETURNS**
A LineScan object of length n.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> plt.plot(sl)
>>>> plt.plot(sl.resample(100))
>>>> plt.show()
"""
signal = sps.resample(self,n)
pts = np.array(self.pointLoc)
# we assume the pixel points are linear
# so we can totally do this better manually
x = linspace(pts[0,0],pts[-1,0],n)
y = linspace(pts[0,1],pts[-1,1],n)
pts = zip(x,y)
retVal = LineScan(list(signal),image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
retVal._update(self)
return retVal
# this needs to be moved out to a cookbook or something
#def linear(xdata,m,b):
# return m*xdata+b
# need to add polyfit too
#http://docs.scipy.org/doc/numpy/reference/generated/numpy.polyfit.html
def fitToModel(self,f,p0=None):
"""
**SUMMARY**
Fit the data to the provided model. This can be any arbitrary
2D signal. Return the data of the model scaled to the data.
**PARAMETERS**
* *f* - a function of the form f(x_values, p0,p1, ... pn) where
p is parameter for the model.
* *p0* - a list of the initial guess for the model parameters.
**RETURNS**
A LineScan object where the fitted model data replaces the
actual data.
**EXAMPLE**
>>>> def aLine(x,m,b):
>>>> return m*x+b
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> fit = sl.fitToModel(aLine)
>>>> plt.plot(sl)
>>>> plt.plot(fit)
>>>> plt.show()
"""
yvals = np.array(self,dtype='float32')
xvals = range(0,len(yvals),1)
popt,pcov = spo.curve_fit(f,xvals,yvals,p0=p0)
yvals = f(xvals,*popt)
retVal = LineScan(list(yvals),image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
retVal._update(self)
return retVal
def getModelParameters(self,f,p0=None):
"""
**SUMMARY**
Fit a model to the data and then return
**PARAMETERS**
* *f* - a function of the form f(x_values, p0,p1, ... pn) where
p is parameter for the model.
* *p0* - a list of the initial guess for the model parameters.
**RETURNS**
The model parameters as a list. For example if you use a line
model y=mx+b the function returns the m and b values that fit
the data.
**EXAMPLE**
>>>> def aLine(x,m,b):
>>>> return m*x+b
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> p = sl.getModelParameters(aLine)
>>>> print p
"""
yvals = np.array(self,dtype='float32')
xvals = range(0,len(yvals),1)
popt,pcov = spo.curve_fit(f,xvals,yvals,p0=p0)
return popt
def convolve(self,kernel):
"""
**SUMMARY**
Convolve the line scan with a one dimenisional kernel stored as
a list. This allows you to create an arbitrary filter for the signal.
**PARAMETERS**
* *kernel* - An Nx1 list or np.array that defines the kernel.
**RETURNS**
A LineScan feature with the kernel applied. We crop off
the fiddly bits at the end and the begining of the kernel
so everything lines up nicely.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> smooth_kernel = [0.1,0.2,0.4,0.2,0.1]
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> out = sl.convolve(smooth_kernel)
>>>> plt.plot(sl)
>>>> plt.plot(out)
>>>> plt.show()
**SEE ALSO**
"""
out = np.convolve(self,np.array(kernel,dtype='float32'),'same')
retVal = LineScan(out,image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2,channel=self.channel)
return retVal
def fft(self):
"""
**SUMMARY**
Perform a Fast Fourier Transform on the line scan and return
the FFT output and the frequency of each value.
**RETURNS**
The FFT as a numpy array of irrational numbers and a one dimensional
list of frequency values.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> fft,freq = sl.fft()
>>>> plt.plot(freq,fft.real,freq,fft.imag)
>>>> plt.show()
"""
signal = np.array(self,dtype='float32')
fft = np.fft.fft(signal)
freq = np.fft.fftfreq(len(signal))
return (fft,freq)
def ifft(self,fft):
"""
**SUMMARY**
Perform an inverse fast Fourier transform on the provided
irrationally valued signal and return the results as a
LineScan.
**PARAMETERS**
* *fft* - A one dimensional numpy array of irrational values
upon which we will perform the IFFT.
**RETURNS**
A LineScan object of the reconstructed signal.
**EXAMPLE**
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(pt1=(0,0),pt2=(300,200))
>>>> fft,frq = sl.fft()
>>>> fft[30:] = 0 # low pass filter
>>>> sl2 = sl.ifft(fft)
>>>> import matplotlib.pyplot as plt
>>>> plt.plot(sl)
>>>> plt.plot(sl2)
"""
signal = np.fft.ifft(fft)
retVal = LineScan(signal.real)
retVal.image = self.image
retVal.pointLoc = self.pointLoc
return retVal
def createEmptyLUT(self,defaultVal=-1):
"""
**SUMMARY**
Create an empty look up table (LUT).
If default value is what the lut is intially filled with
if defaultVal == 0
the array is all zeros.
if defaultVal > 0
the array is set to default value. Clipped to 255.
if defaultVal < 0
the array is set to the range [0,255]
if defaultVal is a tuple of two values:
we set stretch the range of 0 to 255 to match
the range provided.
**PARAMETERS**
* *defaultVal* - See above.
**RETURNS**
A LUT.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> lut = ls.createEmptyLUT()
>>>> ls2 = ls.applyLUT(lut)
>>>> plt.plot(ls)
>>>> plt.plot(ls2)
>>>> plt.show()
"""
lut = None
if( isinstance(defaultVal,list) or
isinstance(defaultVal,tuple)):
start = np.clip(defaultVal[0],0,255)
stop = np.clip(defaultVal[1],0,255)
lut = np.around(np.linspace(start,stop,256),0)
lut = np.array(lut,dtype='uint8')
lut = lut.tolist()
elif( defaultVal == 0 ):
lut = np.zeros([1,256]).tolist()[0]
elif( defaultVal > 0 ):
defaultVal = np.clip(defaultVal,1,255)
lut = np.ones([1,256])*defaultVal
lut = np.array(lut,dtype='uint8')
lut = lut.tolist()[0]
elif( defaultVal < 0 ):
lut = np.linspace(0,256,256)
lut = np.array(lut,dtype='uint8')
lut = lut.tolist()
return lut
def fillLUT(self,lut,idxs,value=255):
"""
**SUMMARY**
Fill up an existing LUT (look up table) at the indexes specified
by idxs with the value specified by value. This is useful for picking
out specific values.
**PARAMETERS**
* *lut* - An existing LUT (just a list of 255 values).
* *idxs* - The indexes of the LUT to fill with the value.
This can also be a sample swatch of an image.
* *value* - the value to set the LUT[idx] to
**RETURNS**
An updated LUT.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> lut = ls.createEmptyLUT()
>>>> swatch = img.crop(0,0,10,10)
>>>> ls.fillLUT(lut,swatch,255)
>>>> ls2 = ls.applyLUT(lut)
>>>> plt.plot(ls)
>>>> plt.plot(ls2)
>>>> plt.show()
"""
# for the love of god keep this small
# for some reason isInstance is being persnickety
if(idxs.__class__.__name__ == 'Image' ):
npg = idxs.getGrayNumpy()
npg = npg.reshape([npg.shape[0]*npg.shape[1]])
idxs = npg.tolist()
value = np.clip(value,0,255)
for idx in idxs:
if(idx >= 0 and idx < len(lut)):
lut[idx]=value
return lut
def threshold(self,threshold=128,invert=False):
"""
**SUMMARY**
Do a 1D threshold operation. Values about the threshold
will be set to 255, values below the threshold will be
set to 0. If invert is true we do the opposite.
**PARAMETERS**
* *threshold* - The cutoff value for our threshold.
* *invert* - if invert is false values above the threshold
are set to 255, if invert is True the are set to 0.
**RETURNS**
The thresholded linescan operation.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> ls2 = ls.threshold()
>>>> plt.plot(ls)
>>>> plt.plot(ls2)
>>>> plt.show()
"""
out = []
high = 255
low = 0
if( invert ):
high = 0
low = 255
for pt in self:
if( pt < threshold ):
out.append(low)
else:
out.append(high)
retVal = LineScan(out,image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
retVal._update(self)
return retVal
def invert(self,max=255):
"""
**SUMMARY**
Do an 8bit invert of the signal. What was black is now
white, what was 255 is now zero.
**PARAMETERS**
* *max* - The maximum value of a pixel in the image, usually 255.
**RETURNS**
The inverted LineScan object.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> ls2 = ls.invert()
>>>> plt.plot(ls)
>>>> plt.plot(ls2)
>>>> plt.show()
"""
out = []
for pt in self:
out.append(255-pt)
retVal = LineScan(out,image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
retVal._update(self)
return retVal
def mean(self):
"""
**SUMMARY**
Computes the statistical mean of the signal.
**RETURNS**
The mean of the LineScan object.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> avg = ls.mean()
>>>> plt.plot(ls)
>>>> plt.axhline(y = avg)
>>>> plt.show()
"""
return float(sum(self))/len(self)
def variance(self):
"""
**SUMMARY**
Computes the variance of the signal.
**RETURNS**
The variance of the LineScan object.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> var = ls.variance()
>>>> var
"""
mean = float(sum(self))/len(self)
summation = 0
for num in self:
summation += (num - mean)**2
return summation/len(self)
def std(self):
"""
**SUMMARY**
Computes the standard deviation of the signal.
**RETURNS**
The standard deviation of the LineScan object.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> avg = ls.mean()
>>>> std = ls.std()
>>>> plt.plot(ls)
>>>> plt.axhline(y = avg)
>>>> plt.axhline(y = avg - std, color ='r')
>>>> plt.axhline(y = avg + std, color ='r')
>>>> plt.show()
"""
mean = float(sum(self))/len(self)
summation = 0
for num in self:
summation += (num - mean)**2
return np.sqrt(summation/len(self))
def median(self,sz=5):
"""
**SUMMARY**
Do a sliding median filter with a window size equal to size.
**PARAMETERS**
* *sz* - the size of the median filter.
**RETURNS**
The linescan after being passed through the median filter.
The last index where the value occurs or None if none is found.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> ls2 = ls.median(7)
>>>> plt.plot(ls)
>>>> plt.plot(ls2)
>>>> plt.show()
"""
if( sz%2==0 ):
sz = sz+1
skip = int(np.floor(sz/2))
out = self[0:skip]
vsz = len(self)
for idx in range(skip,vsz-skip):
val = np.median(self[(idx-skip):(idx+skip)])
out.append(val)
for pt in self[-1*skip:]:
out.append(pt)
retVal = LineScan(out,image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
retVal._update(self)
return retVal
def findFirstIdxEqualTo(self,value=255):
"""
**SUMMARY**
Find the index of the first element of the linescan that has
a value equal to value. If nothing is found None is returned.
**PARAMETERS**
* *value* - the value to look for.
**RETURNS**
The first index where the value occurs or None if none is found.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> idx = ls.findFIRSTIDXEqualTo()
"""
vals = np.where(np.array(self)==value)[0]
retVal = None
if( len(vals) > 0 ):
retVal = vals[0]
return retVal
def findLastIdxEqualTo(self,value=255):
"""
**SUMMARY**
Find the index of the last element of the linescan that has
a value equal to value. If nothing is found None is returned.
**PARAMETERS**
* *value* - the value to look for.
**RETURNS**
The last index where the value occurs or None if none is found.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> idx = ls.findLastIDXEqualTo()
"""
vals = np.where(np.array(self)==value)[0]
retVal = None
if( len(vals) > 0 ):
retVal = vals[-1]
return retVal
def findFirstIdxGreaterThan(self,value=255):
"""
**SUMMARY**
Find the index of the first element of the linescan that has
a value equal to value. If nothing is found None is returned.
**PARAMETERS**
* *value* - the value to look for.
**RETURNS**
The first index where the value occurs or None if none is found.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> idx = ls.findFIRSTIDXEqualTo()
"""
vals = np.where(np.array(self)>=value)[0]
retVal = None
if( len(vals) > 0 ):
retVal = vals[0]
return retVal
def applyLUT(self,lut):
"""
**SUMMARY**
Apply a look up table to the signal.
**PARAMETERS**
* *lut* an array of of length 256, the array elements are the values
that are replaced via the lut
**RETURNS**
A LineScan object with the LUT applied to the values.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> lut = ls.createEmptyLUT()
>>>> ls2 = ls.applyLUT(lut)
>>>> plt.plot(ls)
>>>> plt.plot(ls2)
"""
out = []
for pt in self:
out.append(lut[pt])
retVal = LineScan(out,image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
retVal._update(self)
return retVal
def medianFilter(self, kernel_size=5):
"""
**SUMMARY**
Apply median filter on the data
**PARAMETERS**
* *kernel_size* - Size of the filter (should be odd int) - int
**RETURNS**
A LineScan object with the median filter applied to the values.
**EXAMPLE**
>>> ls = img.getLineScan(x=10)
>>> mf = ls.medianFilter()
>>> plt.plot(ls)
>>> plt.plot(mf)
"""
try:
from scipy.signal import medfilt
except ImportError:
warnings.warn("Scipy vesion >= 0.11 requierd.")
return None
if kernel_size % 2 == 0:
kernel_size-=1
print("Kernel Size should be odd. New kernel size =" , (kernel_size))
medfilt_array = medfilt(np.asarray(self[:]), kernel_size)
retVal = LineScan(medfilt_array.astype("uint8").tolist(), image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2, x=self.col, y=self.row)
retVal._update(self)
return retVal
def detrend(self):
"""
**SUMMARY**
Detren the data
**PARAMETERS**
**RETURNS**
A LineScan object with detrened data.
**EXAMPLE**
>>> ls = img.getLineScan(x=10)
>>> dt = ls.detrend()
>>> plt.plot(ls)
>>> plt.plot(dt)
"""
try:
from scipy.signal import detrend as sdetrend
except ImportError:
warnings.warn("Scipy vesion >= 0.11 requierd.")
return None
detrend_arr = sdetrend(np.asarray(self[:]))
retVal = LineScan(detrend_arr.astype("uint8").tolist(), image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2, x=self.col, y=self.row)
retVal._update(self)
return retVal
def runningAverage(self, diameter=3, algo="uniform"):
"""
**SUMMARY**
Finds the running average by either using a uniform kernel or using a gaussian kernel.
The gaussian kernelis calculated from the standard normal distribution formulae.
**PARAMETERS**
* *diameter* - Size of the window (should be odd int) - int
* *algo* - "uniform" (default) / "gaussian" - used to decide the kernel - string
**RETURNS**
A LineScan object with the kernel of the provided algorithm applied.
**EXAMPLE**
>>> ls = img.getLineScan(x=10)
>>> ra = ls.runningAverage()
>>> rag = ls.runningAverage(15,algo="gaussian")
>>> plt.plot(ls)
>>> plt.plot(ra)
>>> plt.plot(rag)
>>> plt.show()
"""
if diameter%2 == 0:
warnings.warn("Diameter must be an odd integer")
return None
if algo=="uniform":
kernel=list(1/float(diameter)*np.ones(diameter))
elif algo=="gaussian":
kernel=list()
r=float(diameter)/2
for i in range(-int(r),int(r)+1):
kernel.append(np.exp(-i**2/(2*(r/3)**2))/(np.sqrt(2*np.pi)*(r/3)))
retVal = LineScan(map(int,self.convolve(kernel)))
retVal._update(self)
return retVal
def findPeaks(self, window = 30, delta = 3):
"""
**SUMMARY**
Finds the peaks in a LineScan.
**PARAMETERS**
* *window* - the size of the window in which the peak
should have the highest value to be considered as a peak.
By default this is 15 as it gives appropriate results.
The lower this value the more the peaks are returned
* *delta* - the minimum difference between the peak and
all elements in the window
**RETURNS**
A list of (peak position, peak value) tuples.
**EXAMPLE**
>>> ls = img.getLineScan(x=10)
>>> peaks = ls.findPeaks()
>>> print peaks
>>> peaks10 = ls.findPeaks(window=10)
>>> print peaks10
"""
maximum = -np.Inf
width = int(window/2.0)
peaks = []
for index,val in enumerate(self):
#peak found
if val > maximum:
maximum = val
maxpos = index
#checking whether peak satisfies window and delta conditions
if max( self[max(0, index-width):index+width])+delta< maximum:
peaks.append((maxpos, maximum))
maximum = -np.Inf
return peaks
def findValleys(self,window = 30, delta = 3 ):
"""
**SUMMARY**
Finds the valleys in a LineScan.
**PARAMETERS**
* *window* - the size of the window in which the valley
should have the highest value to be considered as a valley.
By default this is 15 as it gives appropriate results.
The lower this value the more the valleys are returned
* *delta* - the minimum difference between the valley and
all elements in the window
**RETURNS**
A list of (peak position, peak value) tuples.
**EXAMPLE**
>>> ls = img.getLineScan(x=10)
>>> valleys = ls.findValleys()
>>> print valleys
>>> valleys10 = ls.findValleys(window=10)
>>> print valleys10
"""
minimum = np.Inf
width = int(window/2.0)
peaks = []
for index,val in enumerate(self):
#peak found
if val < minimum:
minimum = val
minpos = index
#checking whether peak satisfies window and delta conditions
if min( self[max(0, index-width):index+width])-delta > minimum:
peaks.append((minpos, minimum))
minimum = np.Inf
return peaks
def fitSpline(self,degree=2):
"""
**SUMMARY**
A function to generate a spline curve fitting over the points in LineScan with
order of precision given by the parameter degree
**PARAMETERS**
* *degree* - the precision of the generated spline
**RETURNS**
The spline as a LineScan fitting over the initial values of LineScan
**EXAMPLE**
>>> import matplotlib.pyplot as plt
>>> img = Image("lenna")
>>> ls = img.getLineScan(pt1=(10,10)),pt2=(20,20)).normalize()
>>> spline = ls.fitSpline()
>>> plt.plot(ls)
>>> plt.show()
>>> plt.plot(spline)
>>> plt.show()
**NOTES**
Implementation taken from http://www.scipy.org/Cookbook/Interpolation
"""
if degree > 4:
degree = 4 # No significant improvement with respect to time usage
if degree < 1:
warnings.warn('LineScan.fitSpline - degree needs to be >= 1')
return None
retVal = None
y = np.array(self)
x = np.arange(0,len(y),1)
dx = 1
newx = np.arange(0,len(y)-1,pow(0.1,degree))
cj = sps.cspline1d(y)
retVal = sps.cspline1d_eval(cj,newx,dx=dx,x0=x[0])
return retVal
| bsd-3-clause |
rustychris/stompy | stompy/grid/exact_delaunay.py | 1 | 84123 | # A pure-python, exact delaunay triangulation.
# uses robust_predicates for in-circle tests, follows
# the algorithm of CGAL to the extent possible.
import logging
import pdb
logger = logging.getLogger()
import six
import numpy as np
import matplotlib.pyplot as plt
# do these work in py2?
from ..spatial import robust_predicates
from . import unstructured_grid
from ..utils import (circular_pairs, dist, point_segment_distance, set_keywords,
segment_segment_intersection)
if six.PY3:
def cmp(a,b):
return bool(a>b)-bool(a<b)
try:
from scipy import spatial
except ImportError:
spatial=None
class DuplicateNode(Exception):
pass
class BadConstraint(Exception):
def __init__(self,*a,**k):
super(BadConstraint,self).__init__(*a)
set_keywords(self,k)
class IntersectingConstraints(BadConstraint):
edge=None
class DuplicateConstraint(BadConstraint):
nodes=None
class ConstraintCollinearNode(IntersectingConstraints):
"""
Special case of intersections, when a constraint attempts to
run *through* an existing node
"""
node=None
def ordered(x1,x2,x3):
"""
given collinear points, return true if they are in order
along that line
"""
if x1[0]!=x2[0]:
i=0
else:
i=1
return (x1[i]<x2[i]) == (x2[i]<x3[i])
def rel_ordered(x1,x2,x3,x4):
"""
given 4 collinear points, return true if the direction
from x1->x2 is the same as x3=>x4
requires x1!=x2, and x3!=x4
"""
if x1[0]!=x2[0]:
i=0 # choose a coordinate which is varying
else:
i=1
assert x1[i]!=x2[i]
assert x3[i]!=x4[i]
return (x1[i]<x2[i]) == (x3[i]<x4[i])
class Triangulation(unstructured_grid.UnstructuredGrid):
"""
Mimics the Triangulation_2 class of CGAL.
note that we make some additional assumptions on invariants -
nodes, cells and edges are ordered in a consistent way:
"""
INF_NODE=-666
INF_CELL=unstructured_grid.UnstructuredGrid.UNMESHED
max_sides=3
# local exception types
DuplicateNode=DuplicateNode
IntersectingConstraints=IntersectingConstraints
BadConstraint=BadConstraint
ConstraintCollinearNode=ConstraintCollinearNode
post_check=False # enables [expensive] checks after operations
edge_dtype=(unstructured_grid.UnstructuredGrid.edge_dtype +
[ ('constrained',np.bool8) ] )
def add_node(self,**kwargs):
# will eventually need some caching or indexing to make
# the locate faster. locate() happens first so that
# the mesh complies with invariants and doesn't have a dangling
# node
loc=self.locate(kwargs['x'])
n=super(Triangulation,self).add_node(**kwargs)
self.tri_insert(n,loc)
return n
def modify_node(self,n,_brute_force=False,**kwargs):
"""
_brute_force: if True, move node by delete/add, rather than trying
a short cut.
"""
if 'x' not in kwargs:
return super(Triangulation,self).modify_node(n,**kwargs)
old_rec=self.nodes[n]
# Brute force, removing and re-adding, is no good as the
# constraints are lost.
# A slightly more refined, but still brutish, approach, is to save
# the constraints, delete, add, add constraints.
# be sped up
# handle a common case where the node is only moved a small
# distance, such that we only have to do a small amount of
# work to fix up the triangulation
# if the new location is inside a cell adjacent to n, then
# we can [probably?] move the node
if self.dim()<2:
# the short cuts are only written for the 2D case.
_brute_force=True
if not _brute_force:
# check whether new node location is on the "right" side
# of all existing "opposite" edges (the edge of each cell
# which doesn't contain n.
shortcut=True
if shortcut:
my_cells=self.node_to_cells(n)
for c in my_cells:
c_nodes=self.cells['nodes'][c]
c_xy=self.nodes['x'][c_nodes]
pnts=[]
for i,c_node in enumerate(c_nodes):
if c_node==n:
pnts.append(kwargs['x'])
else:
pnts.append(c_xy[i])
if robust_predicates.orientation(*pnts) <=0:
shortcut=False
if shortcut:
# also check for this node being on the convex hull
# find the pair of edges, if they exist, which have
# n, and have the infinite cell to the left.
he_rev=he_fwd=None
for j in self.node_to_edges(n):
if self.edges['cells'][j,1]==self.INF_CELL:
he=self.halfedge(j,1)
elif self.edges['cells'][j,0]==self.INF_CELL:
he=self.halfedge(j,0)
else:
continue
if he.node_fwd()==n:
he_rev=he
elif he.node_rev()==n:
he_fwd=he
else:
assert False
# can't have just one.
assert (he_rev is None) == (he_fwd is None)
if he_rev is not None:
# need to check that the movement of this node does
# not invalidate the orientation with respect to
# neighboring edges of the convex hull.
# get the five consecutive points, where c is the
# node being moved. make sure that a-b-c and c-d-e
# are properly oriented
cons_idxs=[he_rev.rev().node_rev(),
he_rev.node_rev(),
n,
he_fwd.node_fwd(),
he_fwd.fwd().node_fwd()]
abcde=self.nodes['x'][cons_idxs]
abcde[2]=kwargs['x']
if robust_predicates.orientation(*abcde[:3])>0:
shortcut=False
elif robust_predicates.orientation(*abcde[2:])>0:
shortcut=False
elif robust_predicates.orientation(*abcde[1:4])>0:
shortcut=False
if shortcut:
# short cut should work:
retval=super(Triangulation,self).modify_node(n,**kwargs)
self.restore_delaunay(n)
# when refining the above tests, uncomment this to increase
# the amount of validation
# if self.check_convex_hull():
# pdb.set_trace()
return retval
# but adding the constraints back can fail, in which case we should
# roll back our state, and fire an exception.
constraints_to_replace=[]
for j in self.node_to_edges(n):
if self.edges['constrained'][j]:
constraints_to_replace.append( self.edges['nodes'][j].copy() )
old_x=self.nodes['x'][n].copy() # in case of rollback
self.delete_node(n)
for fld in old_rec.dtype.names:
if fld not in ['x','deleted'] and fld not in kwargs:
kwargs[fld]=old_rec[fld]
new_n=self.add_node(_index=n,**kwargs)
try:
for n1,n2 in constraints_to_replace:
self.add_constraint(n1,n2) # This can fail!
except self.IntersectingConstraints as exc:
self.log.warning("modify_node: intersecting constraints - rolling back")
self.delete_node(n)
kwargs['x']=old_x # move it back to where it started
new_n=self.add_node(_index=n,**kwargs)
for n1,n2 in constraints_to_replace:
self.add_constraint(n1,n2) # This should not fail
# but signal to the caller that the modify failed
raise
assert new_n==n
def add_edge(self,**kw):
""" add-on: cells default to INF_CELL, not -1.
"""
j=super(Triangulation,self).add_edge(**kw)
if 'cells' not in kw:
self.edges[j]['cells'][:]=self.INF_CELL
return j
def choose_start_cell(self,t=None):
""" choose a starting cell for trying to locate where a new vertex
should go. May return INF_CELL if there are no valid cells.
t: can specify a target point which may be used with a spatial index
to speed up the query.
"""
c=0
try:
while self.cells['deleted'][c]:
c+=1
return c
except IndexError:
return self.INF_CELL
IN_VERTEX=0
IN_EDGE=2
IN_FACE=3
OUTSIDE_CONVEX_HULL=4
OUTSIDE_AFFINE_HULL=5
def dim(self):
if len(self.cells) and not np.all(self.cells['deleted']):
return 2
elif len(self.edges) and not np.all(self.edges['deleted']):
return 1
elif len(self.nodes) and not np.all(self.nodes['deleted']):
return 0
else:
return -1
def angle_sort_adjacent_nodes(self,n,ref_nbr=None,topo=True):
if topo:
return self.topo_sort_adjacent_nodes(n,ref_nbr)
else:
return super(Triangulation,self).angle_sort_adjacent_ndoes(n,ref_nbr=ref_nbr)
def topo_sort_adjacent_nodes(self,n,ref_nbr=None):
""" like angle_sort_adjacent_nodes, but relying on topology, not geometry.
"""
nbrs=list(self.node_to_nodes(n))
if len(nbrs)<3:
snbrs=nbrs
else:
he_nbrs = [ self.nodes_to_halfedge(n,nbr)
for nbr in nbrs ]
map_next={}
for he in he_nbrs:
# this doesn't use angle_sort
c=he.cell_opp()
map_next[c] = (he.node_fwd(),he.cell())
trav0=trav=c
snbrs=[]
while 1:
#if len(snbrs)>20: # DBG
# pdb.set_trace()
node,cell = map_next[trav]
snbrs.append(node)
trav=cell
if trav==trav0:
break
if ref_nbr is not None:
i=list(snbrs).index(ref_nbr)
snbrs=np.roll(snbrs,-i)
return snbrs
def locate(self,t,c=None):
""" t: [x,y] point to locate
c: starting cell, if known
return loc=[face,loc_type,loc_index]
face: INF_CELL if t is not on or inside a finite cell
loc_type:
OUTSIDE_AFFINE_HULL: adding this vertex will increase the dimension of the triangulation.
empty triangulation: dim=-1
single vertex: dim=0
collinear edges: dim=1
faces: dim=2
loc_index set to current dimensionality
OUTSIDE_CONVEX_HULL: dimensionality may still be 1 or 2.
if the dimension is 1, then loc_index gives the nearest node
if the dimension is 2, then loc_index gives an adjacent half-edge
IN_VERTEX: t coincides with existing vertex,
if face is finite, then it's a cell containing the vertex, and loc_index
is the index of that vertex in the cell.
if face is INF_CELL, implies dimension<2, and loc_index gives existing node
IN_EDGE: t is collinear with existing edge.
if face is finite, it is a cell containing the edge.
loc_index is the index of the edge itself.
face may be INF_CELL, which implies dimension<2
IN_FACE: t is in the interior of a face. face is the containing cell. loc_index
is not used.
"""
c=c or self.choose_start_cell(t)
prev=None # previous face
# To identify the right orientation of the half-edge, remember
# the ordering of the nodes -- this is CCW ordering from the
# perspective of prev
last_nodes=None
last_edge=None # the edge between c and prev
# Checks for affine hull -
# 3rd element gives the current dimensionality of the affine hull
if self.Nnodes_valid()==0:
return (self.INF_CELL,self.OUTSIDE_AFFINE_HULL,-1)
elif self.Nedges_valid()==0:
return (self.INF_CELL,self.OUTSIDE_AFFINE_HULL,0)
elif self.Ncells_valid()==0:
return self.locate_1d(t,c)
while True:
if c==self.INF_CELL:
# // c must contain t in its interior
# lt = OUTSIDE_CONVEX_HULL;
# li = c->index(infinite_vertex());
# Changed to give adjacent edge, rather than
# confusing loc_index=4
# loc=(self.INF_CELL,self.OUTSIDE_CONVEX_HULL,last_edge)
# changed again, to give a half-edge
# flip the order because they were in the order with respect
# to the prev face, but now we jumped over last_edge
he=self.nodes_to_halfedge( last_nodes[1],last_nodes[0] )
loc=(self.INF_CELL,self.OUTSIDE_CONVEX_HULL,he)
return loc
p0=self.nodes['x'][self.cells['nodes'][c,0]]
p1=self.nodes['x'][self.cells['nodes'][c,1]]
p2=self.nodes['x'][self.cells['nodes'][c,2]]
prev = c
# Orientation o0, o1, o2;
# nodes are stored in CCW order for the cell.
# 1st edge connects first two nodes
# neighboring cells follow the edges
o0 = robust_predicates.orientation(p0,p1,t)
if o0 == -1: # CW
last_edge=self.cell_to_edges(c)[0]
last_nodes=self.cells['nodes'][c,[0,1]]
c=self.cell_to_cells(c)[0]
continue
o1 = robust_predicates.orientation(p1,p2,t)
if o1 == -1:
last_edge=self.cell_to_edges(c)[1]
last_nodes=self.cells['nodes'][c,[1,2]]
c=self.cell_to_cells(c)[1]
continue
o2 = robust_predicates.orientation(p2,p0,t)
if o2 == -1:
last_edge=self.cell_to_edges(c)[2]
last_nodes=self.cells['nodes'][c,[2,0]]
c=self.cell_to_cells(c)[2]
continue
# must be in or on a face --
break
# For simplicity, I'm skipping some optimizations which avoid re-checking
# the previous edge. see Triangulation_2.h:2616
# now t is in c or on its boundary
o_sum=(o0==0)+(o1==0)+(o2==0)
if o_sum==0:
loc=(c,self.IN_FACE,4)
elif o_sum==1:
if o0==0:
j=0
elif o1==0:
j=1
else:
j=2
# better to consistently return the edge index here, not
# just its index in the cell
loc=(c,self.IN_EDGE,self.cells['edges'][c,j])
elif o_sum==2:
if o0!=0:
loc=(c,self.IN_VERTEX,2)
elif o1!=0:
loc=(c,self.IN_VERTEX,0)
else:
loc=(c,self.IN_VERTEX,1)
else:
assert False
return loc
def locate_1d(self,t,c):
# There are some edges, and t may fall within an edge, off the end,
# or off to the side.
j=six.next(self.valid_edge_iter())
p0=self.nodes['x'][ self.edges['nodes'][j,0] ]
p1=self.nodes['x'][ self.edges['nodes'][j,1] ]
o=robust_predicates.orientation(p0,p1,t)
if o!=0:
return (self.INF_CELL,self.OUTSIDE_AFFINE_HULL,1)
# t is collinear - need to find out whether it's in an edge
# or not
# choose a coordinate which varies along the line
if p0[0]!=p1[0]:
coord=0
else:
coord=1
if (t[coord]<p0[coord]) != (t[coord]<p1[coord]):
return (self.INF_CELL,self.IN_EDGE,j)
# do we need to go towards increasing or decreasing coord?
if (t[coord]<p0[coord]) and (t[coord]<p1[coord]):
direc=-1
else:
direc=1
while True:
# j indexes the edge we just tested.
# p0 and p1 are the endpoints of the edge
# 1. do we want a neighbor of n0 or n1?
if direc*cmp(p0[coord],p1[coord]) < 0: # want to go towards p1
n_adj=self.edges['nodes'][j,1]
else:
n_adj=self.edges['nodes'][j,0]
for jnext in self.node_to_edges(n_adj):
if jnext!=j:
j=jnext
break
else:
# walked off the end of the line -
# n_adj is the nearest to us
return (self.INF_CELL,self.OUTSIDE_CONVEX_HULL,n_adj)
p0=self.nodes['x'][ self.edges['nodes'][j,0] ]
p1=self.nodes['x'][ self.edges['nodes'][j,1] ]
if (t[coord]<p0[coord]) != (t[coord]<p1[coord]):
return (self.INF_CELL,self.IN_EDGE,j)
def tri_insert(self,n,loc):
# n: index for newly inserted node.
# note that loc must already be computed -
# types of inserts:
# on an edge, inside a face, outside the convex hull
# outside affine hull
loc_c,loc_type,loc_idx = loc
if loc_type==self.IN_FACE:
self.tri_insert_in_face(n,loc)
elif loc_type==self.IN_EDGE:
self.tri_insert_in_edge(n,loc)
elif loc_type==self.IN_VERTEX:
raise DuplicateNode()
elif loc_type==self.OUTSIDE_CONVEX_HULL:
self.tri_insert_outside_convex_hull(n,loc)
elif loc_type==self.OUTSIDE_AFFINE_HULL:
self.tri_insert_outside_affine_hull(n,loc)
# for some of those actions, this could be skipped
self.restore_delaunay(n)
def tri_insert_in_face(self,n,loc):
loc_f,loc_type,_ = loc
a,b,c=self.cells['nodes'][loc_f]
self.delete_cell(loc_f)
self.add_edge(nodes=[n,a])
self.add_edge(nodes=[n,b])
self.add_edge(nodes=[n,c])
self.add_cell(nodes=[n,a,b])
self.add_cell(nodes=[n,b,c])
self.add_cell(nodes=[n,c,a])
def tri_insert_in_edge(self,n,loc):
""" Takes care of splitting the edge and any adjacent cells
"""
loc_f,loc_type,loc_edge = loc
self.log.debug("Loc puts new vertex in edge %s"%loc_edge)
cells_to_split=[]
for c in self.edge_to_cells(loc_edge):
if c<0: continue
cells_to_split.append( self.cells[c].copy() )
self.log.debug("Deleting cell on insert %d"%c)
self.delete_cell(c)
# Modify the edge:
a,c=self.edges['nodes'][loc_edge]
b=n
self.delete_edge(loc_edge)
self.add_edge(nodes=[a,b])
self.add_edge(nodes=[b,c])
for cell_data in cells_to_split:
common=[n for n in cell_data['nodes']
if n!=a and n!=c][0]
jnew=self.add_edge(nodes=[b,common])
for replace in [a,c]:
nodes=list(cell_data['nodes'])
idx=nodes.index(replace)
nodes[idx]=b
self.add_cell(nodes=nodes)
def tri_insert_outside_convex_hull(self,n,loc):
dim=self.dim()
if dim==2:
self.tri_insert_outside_convex_hull_2d(n,loc)
elif dim==1:
self.tri_insert_outside_convex_hull_1d(n,loc)
else:
assert False
def tri_insert_outside_convex_hull_1d(self,n,loc):
self.log.debug("tri_insert_outside_convex_hull_1d")
n_adj=loc[2]
self.add_edge(nodes=[n,n_adj])
def tri_insert_outside_convex_hull_2d(self,n,loc):
# HERE:
# the CGAL code is a little funky because of the use of
# infinite vertices and the like.
# the plan here:
# a. change 'locate' to return halfedges instead of just an
# edge. otherwise we'd have to redo the orientation check here.
# b. traverse the half-edge forwards and backwards, accumulating
# lists of adjacent edges which also satisfy the CCW rule.
# c. create triangles with n and the given half-edge, as well as the
# accumulated adjacent edges
# the result then is that the convex hull is built out.
# Triangulation_2.h:1132
assert loc[0]==self.INF_CELL # sanity.
he0=loc[2] # adjacent half-edge
def check_halfedge(he):
nodes=[he.node_rev(),he.node_fwd(),n]
pnts=self.nodes['x'][nodes]
ccw=robust_predicates.orientation(pnts[0],pnts[1],pnts[2])
return ccw>0
assert check_halfedge(he0)
addl_fwd=[]
he=he0.fwd()
while check_halfedge(he):
addl_fwd.append(he)
he=he.fwd()
addl_rev=[]
he=he0.rev()
while check_halfedge(he):
addl_rev.append(he)
he=he.rev()
self.add_edge( nodes=[he0.node_rev(),n] )
self.add_edge( nodes=[he0.node_fwd(),n] )
self.add_cell( nodes=[he0.node_rev(),he0.node_fwd(),n] )
for he in addl_fwd:
self.add_edge( nodes=[he.node_fwd(),n] )
# the second node *had* been ne0.node_fwd(), but that
# was probably a typo.
self.add_cell( nodes=[he.node_rev(),he.node_fwd(),n] )
for he in addl_rev:
self.add_edge( nodes=[he.node_rev(),n] )
# same here.
self.add_cell( nodes=[he.node_rev(),he.node_fwd(),n] )
# 1. Check orientation. Since we get an unoriented edge j_adj,
# all we can do is assert that the points are not collinear.
# 2. loops through faces incident to infinite vertex (?)
# gathering a list of external edges which make a CCW triangle
# with the vertex to insert. stop on the first edge which fails this.
# This is done first traversing CCW, then again traversing CW
# 3. Make the new face with the given edge..
#
def tri_insert_outside_affine_hull(self,n,loc):
self.log.debug("Insert outside affine hull")
loc_face,loc_type,curr_dim = loc
if curr_dim==-1:
self.log.debug(" no nodes, no work")
elif curr_dim==0:
self.log.debug(" simply add edge")
for nbr in self.valid_node_iter():
if nbr != n:
self.add_edge(nodes=[n,nbr])
elif curr_dim==1:
self.log.debug(" add edges and cells")
# the strategy in Triangulation_2.h makes some confusing
# use of the infinite face - take a less elegant, more explicit
# approach here
orig_edges=list(self.valid_edge_iter())
for nbr in self.valid_node_iter():
if nbr != n:
self.add_edge(nodes=[n,nbr])
for j in orig_edges:
n1,n2=self.edges['nodes'][j]
self.add_cell( nodes=[n,n1,n2] )
else:
assert False
def add_cell(self,_force_invariants=True,**kwargs):
if _force_invariants:
nodes=kwargs['nodes']
# Make sure that topological invariants are maintained:
# nodes are ordered ccw.
# edges are populated
# used to assume/force the edges to be sequenced opposite nodes.
# but that is a triangulation-specific assumption, while we're using
# a general unstructured_grid base class. The base class makes
# an incompatible assumption, that the first edge connects the first
# two nodes.
pnts=self.nodes['x'][nodes]
ccw=robust_predicates.orientation(pnts[0],pnts[1],pnts[2])
assert ccw!=0
if ccw<0:
nodes=nodes[::-1]
kwargs['nodes']=nodes
j0=self.nodes_to_edge(nodes[0],nodes[1])
j1=self.nodes_to_edge(nodes[1],nodes[2])
j2=self.nodes_to_edge(nodes[2],nodes[0])
kwargs['edges']=[j0,j1,j2]
c=super(Triangulation,self).add_cell(**kwargs)
# update the link from edges back to cells
for ji,j in enumerate(self.cells['edges'][c]):
# used to attempt to enforce this:
# ji-th edge is the (ji+1)%3,(ji+2)%3 nodes of the cell
# but that's not compatible with checks in unstructured_grid
# but need to know if the edge is in that order or the
# opposite
if self.edges['nodes'][j,0] == self.cells['nodes'][c,ji]:
self.edges['cells'][j,0] = c
else:
self.edges['cells'][j,1] = c
return c
def flip_edge(self,j):
"""
rotate the given edge CCW. requires that triangular cells
exist on both sides of the edge
(that's not a hard and fast requirement, just makes it easier
to implemenet. There *does* have to be a potential cell on either
side).
"""
c_left,c_right=self.edges['cells'][j,:]
self.log.debug("Flipping edge %d, with cells %d, %d nodes %d,%d"%(j,c_left,c_right,
self.edges['nodes'][j,0],
self.edges['nodes'][j,1]) )
assert c_left>=0 # could be relaxed, at the cost of some complexity here
assert c_right>=0
# could work harder to preserve extra info:
#c_left_data = self.cells[c_left].copy()
#c_right_data = self.cells[c_right].copy()
# This is dangerous! - deleting the cells means that topo_sort is no good,
# and that breaks half-edge ops.
# moving to happen a bit later -
# self.delete_cell(c_left)
# self.delete_cell(c_right)
he_left=unstructured_grid.HalfEdge(self,j,0)
he_right=unstructured_grid.HalfEdge(self,j,1)
na,nc = self.edges['nodes'][j]
nd=he_left.fwd().node_fwd()
nb=he_right.fwd().node_fwd()
# DBG
if 0:
for n,label in zip( [na,nb,nc,nd],
"abcd" ):
plt.text( self.nodes['x'][n,0],
self.nodes['x'][n,1],
label)
# keep the time where the cells are deleted to a minimum
self.delete_cell(c_left)
self.delete_cell(c_right)
self.modify_edge(j,nodes=[nb,nd])
new_left =self.add_cell(nodes=[na,nb,nd])
new_right=self.add_cell(nodes=[nc,nd,nb])
return new_left,new_right
def delete_node(self,n):
""" Triangulation version implies cascade, but also
patches up the triangulation
"""
assert n>=0
N=self.Nnodes_valid()
if N==1:
super(Triangulation,self).delete_node(n)
elif N==2:
j=self.node_to_edges(n)[0]
self.delete_edge(j)
super(Triangulation,self).delete_node(n)
elif self.dim()==1:
self.delete_node_1d(n)
else:
self.delete_node_2d(n)
def delete_node_1d(self,n):
# Triangulation_2.h hands this off to the triangulation data structure
# That code looks like:
assert self.dim() == 1
assert self.Nnodes_valid() > 2
# Two cases - either n is at the end of a line of nodes,
# or it's between two nodes.
nbrs=self.node_to_nodes(n)
if len(nbrs)==1: # easy, we're at the end
j=self.nodes_to_edge(n,nbrs[0])
self.delete_edge(j)
super(Triangulation,self).delete_node(n)
else:
assert len(nbrs)==2
j1=self.nodes_to_edge(n,nbrs[0])
j2=self.nodes_to_edge(n,nbrs[1])
self.delete_edge(j1)
self.delete_edge(j2)
super(Triangulation,self).delete_node(n)
self.add_edge( nodes=nbrs )
def test_delete_node_dim_down(self,n):
# see Triangulation_2.h : test_dim_down
# test the dimensionality of the resulting triangulation
# upon removing of vertex v
# it goes down to 1 iff
# 1) any finite face is incident to v
# 2) all vertices are collinear
assert self.dim() == 2
for c in self.valid_cell_iter():
if n not in self.cell_to_nodes(c):
# There is a triangle not involving n
# deleting n would retain a 2D triangulation
return False
pnts=[self.nodes['x'][i]
for i in self.valid_node_iter()
if i!=n]
a,b = pnts[:2]
for c in pnts[2:]:
if robust_predicates.orientation(a,b,c) != 0:
return False
return True
def delete_node_2d(self,n):
if self.test_delete_node_dim_down(n):
# deleting n yields a 1D triangulation - no faces
for c in self.valid_cell_iter():
self.delete_cell(c)
# copy
for j in list(self.node_to_edges(n)):
self.delete_edge(j)
super(Triangulation,self).delete_node(n)
return
# first, make a hole around n
deletee=n
# new way
nbrs=self.angle_sort_adjacent_nodes(deletee)
edges_to_delete=[]
hole_nodes=[]
for nbrA,nbrB in circular_pairs(nbrs):
hole_nodes.append(nbrA)
he=self.nodes_to_halfedge(nbrA,nbrB)
if (he is None) or (he.cell()<0) or (n not in self.cell_to_nodes(he.cell())):
hole_nodes.append('inf')
edges_to_delete.append( self.nodes_to_edge( [deletee,nbrA] ) )
for j in edges_to_delete:
self.delete_edge_cascade(j)
super(Triangulation,self).delete_node(deletee)
# Use the boundary completion approach described in Devillers 2011
# it's not terribly slow, and can be done with the existing
# helpers.
self.fill_hole(hole_nodes)
def fill_hole(self,hole_nodes):
# track potentially multiple holes
# a few place use list-specific semantics - not ndarray
hole_nodes=list(hole_nodes)
holes_nodes=[ hole_nodes ]
while len(holes_nodes):
hole_nodes=holes_nodes.pop()
while 'inf' in hole_nodes[:2]:
hole_nodes = hole_nodes[1:] + hole_nodes[:1]
a,b=hole_nodes[:2]
self.log.debug("Considering edge %d-%d"%(a,b) )
# inf nodes:
# can't test any geometry. seems like we can only have boundary
# faces if the hole included an inf node.
# so drop it from candidates here, but remember that we saw it
# first, sweep through the candidates to test CCW
has_inf=False
c_cand1=hole_nodes[2:]
c_cand2=[]
for c in c_cand1:
if c=='inf':
has_inf=True
elif robust_predicates.orientation( self.nodes['x'][a],
self.nodes['x'][b],
self.nodes['x'][c] ) > 0:
c_cand2.append(c)
self.log.debug("After CCW tests, %s are left"%c_cand2)
while len(c_cand2)>1:
c=c_cand2[0]
for d in c_cand2[1:]:
tst=robust_predicates.incircle( self.nodes['x'][a],
self.nodes['x'][b],
self.nodes['x'][c],
self.nodes['x'][d] )
if tst>0:
self.log.debug("%d was inside %d-%d-%d"%(d,a,b,c))
c_cand2.pop(0)
break
else:
# c passed all the tests
c_cand2=[c]
break
# if the hole nodes are already all convex, then they already
# form the new convex hull - n was on the hull and simply goes
# away
if has_inf and not c_cand2:
c_cand2=['inf']
c='inf' # was this missing??
else:
c=c_cand2[0]
self.log.debug("Decided on %s-%s-%s"%(a,b,c))
# n.b. add_cell_and_edges is probably what is responsible
# for the painless dealing with collinear boundaries.
if c!='inf':
self.add_cell_and_edges( nodes=[a,b,c] )
# what hole to put back on the queue?
if len(hole_nodes)==3:
# finished this hole.
self.log.debug("Hole is finished")
continue
elif c==hole_nodes[2]:
self.log.debug("Hole is trimmed from front")
hole_nodes[:3] = [a,c]
holes_nodes.append( hole_nodes )
elif c==hole_nodes[-1]:
self.log.debug("Hole is trimmed from back")
hole_nodes=hole_nodes[1:] # drop a
self.log.debug(" New hole is %s"%hole_nodes)
holes_nodes.append( hole_nodes )
else:
self.log.debug("Created two new holes")
idx=hole_nodes.index(c)
h1=hole_nodes[1:idx+1]
h2=hole_nodes[idx:] + hole_nodes[:1]
self.log.debug(" New hole: %s"%h1)
self.log.debug(" New hole: %s"%h2)
holes_nodes.append( h1 )
holes_nodes.append( h2 )
# Make a check for the delaunay criterion:
def check_global_delaunay(self):
bad_checks=[] # [ (cell,node),...]
for c in self.valid_cell_iter():
nodes=self.cells['nodes'][c]
pnts=self.nodes['x'][nodes]
# brute force - check them all.
for n in self.valid_node_iter():
if n in nodes:
continue
t=self.nodes['x'][n]
check=robust_predicates.incircle(pnts[0],pnts[1],pnts[2],t)
if check>0:
# how do we check for constraints here?
# maybe more edge-centric?
# tests of a cell on one side of an edge against a node on the
# other is reflexive.
#
# could go through the edges of c,
msg="Node %d is inside the circumcircle of cell %d (%d,%d,%d)"%(n,c,
nodes[0],nodes[1],nodes[2])
self.log.error(msg)
bad_checks.append( (c,n) )
return bad_checks
def check_local_delaunay(self):
""" Check both sides of each edge - can deal with constrained edges.
"""
bad_checks=[] # [ (cell,node),...]
for j in self.valid_edge_iter():
if self.edges['constrained'][j]:
continue
c1,c2 = self.edge_to_cells(j)
if c1<0 or c2<0:
continue
# always check the smaller index -
# might help with caching later on.
c=min(c1,c2)
c_opp=max(c1,c2)
nodes=self.cells['nodes'][c]
pnts=self.nodes['x'][nodes]
# brute force - check them all.
for n in self.cell_to_nodes(c_opp):
if n in nodes:
continue
t=self.nodes['x'][n]
check=robust_predicates.incircle(pnts[0],pnts[1],pnts[2],t)
if check>0:
msg="Node %d is inside the circumcircle of cell %d (%d,%d,%d)"%(n,c,
nodes[0],nodes[1],nodes[2])
self.log.error(msg)
bad_checks.append( (c,n) )
raise Exception('fail')
return bad_checks
def check_orientations(self):
"""
Checks all cells for proper CCW orientation,
return a list of cell indexes of failures.
"""
bad_cells=[]
for c in self.valid_cell_iter():
node_xy=self.nodes['x'][self.cells['nodes'][c]]
if robust_predicates.orientation(*node_xy) <= 0:
bad_cells.append(c)
return bad_cells
def check_convex_hull(self):
# find an edge on the convex hull, walk the hull and check
# all consecutive orientations
e2c=self.edge_to_cells()
for j in self.valid_edge_iter():
if e2c[j,0]==self.INF_CELL:
he=self.halfedge(j,0)
break
elif e2c[j,1]==self.INF_CELL:
he=self.halfedge(j,1)
break
else:
assert False
he0=he
bad_hull=[]
while 1:
a=he.node_rev()
b=he.node_fwd()
he=he.fwd()
c=he.node_fwd()
if robust_predicates.orientation(*self.nodes['x'][[a,b,c]])>0:
bad_hull.append( [a,b,c])
if he==he0:
break
return bad_hull
def restore_delaunay(self,n):
""" n: node that was just inserted and may have adjacent cells
which do not meet the Delaunay criterion
"""
# n is node for Vertex_handle v
if self.dim() <= 1:
return
# a vertex is shared by faces, but "stores" only one face.
# Face_handle f=v->face();
# This code iterates over the faces adjacent to v
# in ccw order.
# Face_handle next;
# int i;
# Face_handle start(f);
# do {
# i = f->index(v);
# next = f->neighbor(ccw(i)); // turn ccw around v
# propagating_flip(f,i);
# f=next;
# } while(next != start);
# Shaky on the details, but for starters, try marking the CCW sweep
# based on neighbor nodes.
nbr_nodes=self.angle_sort_adjacent_nodes(n)
N=len(nbr_nodes)
for i in range(N):
trav=nbr_nodes[i]
trav_next=nbr_nodes[(i+1)%N]
c=self.nodes_to_cell( [n,trav,trav_next],fail_hard=False)
if c is not None:
for i in [0,1,2]:
if self.cells['nodes'][c,i]==n:
break
else:
assert False
if c is not None:
self.propagating_flip(c,i)
if self.post_check:
bad=self.check_local_delaunay()
if bad:
raise self.GridException("Delaunay criterion violated")
def propagating_flip(self,c,i):
# this is taken from non_recursive_propagating_flip
# c: cell, akin to face_handle
# i: index of the originating vertex in cell c.
# track the stack based on the halfedge one place CW
# from the edge to be flipped.
edges=[] # std::stack<Edge> edges;
vp = self.cells['nodes'][c,i] # const Vertex_handle& vp = f->vertex(i);
p=self.nodes['x'][vp] # const Point& p = vp->point();
# maybe better to use half-edges here.
# ordering of edges is slightly different than CGAL.
# if i gives the vertex,
# edges.push(Edge(f,i)); # this is the edge *opposite* vp
# for our ordering, need edge i+1
edges.append( self.cell_to_halfedge(c,i) )
while edges: # (! edges.empty()){
#const Edge& e = edges.top()
he=edges[-1]
he_flip=he.fwd()
# not sure about this part:
if self.edges['constrained'][he_flip.j]:
edges.pop()
continue
nbr=he_flip.cell_opp()
if nbr>=0:
# assuming that ON_POSITIVE_SIDE would mean that p (the location of the
# originating vertex) is *inside* the CCW-defined circle of the neighbor
# and would thus mean that the delaunay criterion is not satisfied.
#if ON_POSITIVE_SIDE != side_of_oriented_circle(n, p, true):
nbr_points= self.nodes['x'][ self.cells['nodes'][nbr] ]
p_in_nbr = robust_predicates.incircle(nbr_points[0],
nbr_points[1],
nbr_points[2],
p )
#if side_of_oriented_circle(n, p, true) == ON_POSITIVE_SIDE:
if p_in_nbr > 0:
self.flip_edge(he_flip.j)
extra=he.rev().opposite()
edges.append(extra)
continue
edges.pop() # drops last item
continue
def find_intersected_elements(self,nA,nB):
"""
returns a history of the elements traversed.
this includes:
('node',<node index>)
('edge',<half edge>)
('cell',<cell index>)
note that traversing along an edge is not included - but any
pair of nodes in sequence implies an edge between them.
"""
assert nA!=nB
assert not self.nodes['deleted'][nA]
assert not self.nodes['deleted'][nB]
# traversal could encounter multiple types of elements
trav=('node',nA)
A=self.nodes['x'][nA]
B=self.nodes['x'][nB]
history=[trav]
if self.dim()==1:
assert trav[0]=='node'
n_nbrs=self.node_to_nodes(trav[1])
for n_nbr in n_nbrs:
if n_nbr==nB:
history.append( ('node',nB) )
return history
if ordered( A,
self.nodes['x'][n_nbr],
B ):
trav=('node',n_nbr)
history.append( trav )
he=self.nodes_to_halfedge(nA,n_nbr)
break
else:
assert False # should never get here
while trav!=('node',nB):
he=he.fwd()
trav=('node',he.node_fwd())
history.append(trav)
return history
else:
while trav!=('node',nB):
# DBG!
if len(history)>1 and history[0]==history[1]:
import pdb
pdb.set_trace()
if trav[0]=='node':
ntrav=trav[1]
for c in self.node_to_cells(ntrav):
cn=self.cell_to_nodes(c)
# print "At node %d, checking cell %d (%s)"%(ntrav,c,cn)
ci_trav=list(cn).index(ntrav) # index of ntrav in cell c
nD=cn[(ci_trav+1)%3]
nE=cn[(ci_trav+2)%3]
if nD==nB or nE==nB:
trav=('node',nB)
# print "Done"
break
D=self.nodes['x'][nD]
oD=robust_predicates.orientation( A,B,D )
if oD>0:
continue
N=self.nodes['x'][ntrav]
if oD==0 and ordered(N,D,B):
# fell exactly on the A-B segment, and is in the
# right direction
trav=('node',nD)
break
E=self.nodes['x'][nE]
oE=robust_predicates.orientation( A,B,E )
if oE<0:
continue
if oE==0 and ordered(N,E,B):
# direction
trav=('node',nE)
break
j=self.cell_to_edges(c)[ (ci_trav+1)%3 ]
j_nbrs=self.edge_to_cells(j)
# AB crosses an edge - record the edge, and the side we are
# approaching from:
history.append( ('cell',c) )
if j_nbrs[0]==c:
trav=('edge',self.halfedge(j,0))
# making sure I got the 0/1 correct
assert trav[1].cell()==c
break
elif j_nbrs[1]==c:
trav=('edge',self.halfedge(j,1))
# ditto
assert trav[1].cell()==c
break
assert False
elif trav[0]=='edge':
he=trav[1].opposite()
#jnodes=self.edges['nodes'][j]
# have to choose between the opposite two edges or their common
# node:
c_next=he.cell()
history.append( ('cell',c_next) )
nD=he.fwd().node_fwd()
# print "Entering cell %d with nodes %s"%(c_next,self.cell_to_nodes(c_next))
oD=robust_predicates.orientation( A,B, self.nodes['x'][nD] )
if oD==0:
trav=('node',nD)
elif oD>0:
# going to cross
trav=('edge',he.fwd())
else:
trav=('edge',he.rev())
else:
assert False
history.append(trav)
return history
def locate_for_traversal_outside(self,p,p_other,loc_face,loc_type,loc_index):
"""
Helper method for locate_for_traversal()
handle the case where p is outside the triangulation, so loc_type
is either OUTSIDE_AFFINE_HULL or OUTSIDE_CONVEX_HULL
returns
('edge',<half-edge>)
('node',<node>)
(None,None) -- the line between p and p_other doesn't intersect the triangulation
"""
dim=self.dim()
if dim<0:
# there are no nodes, no work to be done
return (None,None)
elif dim==0:
# a single node. either we'll intersect it, or not.
N=six.next(self.valid_node_iter()) # get the only valid node
pN=self.nodes['x'][N]
# p_other could be coincident with N:
if (pN[0]==p_other[0]) and (pN[1]==p_other[1]):
return ('node',N)
# or we have to test for pN falling on the line between p,p_other
oN=robust_predicates.orientation(p, pN, p_other)
# either the segment passes through the one node, or doesn't intersect
# at all:
if oN==0 and ordered(p, pN, p_other):
return ('node',N)
else:
return (None,None)
elif dim==1:
# This could be much smarter, but current use case has this as a rare
# occasion, so just brute force it. find a half-edge, make sure it points
# towards us, and go.
if loc_type==self.OUTSIDE_AFFINE_HULL:
# we know that p is not on the line, but p_other could be.
# get an edge:
j=six.next(self.valid_edge_iter())
he=self.halfedge(j,0)
# get a half-edge facing p:
oj=robust_predicates.orientation(p,
self.nodes['x'][he.node_rev()],
self.nodes['x'][he.node_fwd()])
assert oj!=0.0 # that would mean we're collinear
# if the left side of he is facing us,
if oj>0:
# good - the left side of he, from rev to fwd, is facing p.
pass
else:
# flip it.
he=he.opposite()
# first - check against p_other - it could be on the same side
# of the line, on the line, or on the other side of the line.
ojo=robust_predicates.orientation(p_other,
self.nodes['x'][he.node_rev()],
self.nodes['x'][he.node_fwd()])
if ojo>0:
# p_other is on the same side of the line as p
return (None,None)
elif ojo==0:
# still have to figure out whether p_other is in the line or
# off the end.
o_loc_face,o_loc_type,o_loc_index=self.locate(p_other)
# just saw that it was in line, so better not be outside affine hull
assert o_loc_type!=self.OUTSIDE_AFFINE_HULL
if o_loc_type==self.OUTSIDE_CONVEX_HULL:
# a point off the line to a point beyond the ends of the line -
# no intersection.
return (None,None)
else:
if o_loc_type==self.IN_VERTEX:
return ('node',o_loc_index)
elif o_loc_type==self.IN_EDGE:
# This had been just returning the index, but we should
# be return half-edge.
# Make sure it faces p:
he=self.halfedge(o_loc_index,0)
oj2=robust_predicates.orientation(p,
self.nodes['x'][he.node_rev()],
self.nodes['x'][he.node_fwd()])
assert oj2!=0.0 # that would mean we're collinear
# if the left side of he is facing us,
if oj2>0:
# good - the left side of he, from rev to fwd, is facing p.
pass
else:
# flip it.
he=he.opposite()
return ('edge',he)
# shouldn't be possible
assert False
else: # p_other is on the other side
o_rev=robust_predicates.orientation(p,
self.nodes['x'][he.node_rev()],
p_other)
if o_rev==0.0:
return ('node',he.node_rev())
if o_rev > 0:
# rev is to the right of the p--p_other line,
# so walk forward...
A=p ; B=p_other
else:
# flip it around to keep the loop logic the same.
# note that this results in one extra loop, since rev
# becomes fwd and we already know that rev is not
# far enough over. whatever.
A=p_other ; B=p
he=he.opposite()
while 1:
n_fwd=he.node_fwd()
o_fwd=robust_predicates.orientation(A,
self.nodes['x'][n_fwd],
B)
if o_fwd==0.0:
return ('node',n_fwd)
if o_fwd<0:
return ('edge',he) # had been he.j, but we should return half-edge
# must go further!
he_opp=he.opposite()
he=he.fwd()
if he == he_opp: # went round the end - no intersection.
return (None,None)
else: # OUTSIDE_CONVEX_HULL
# points are in a line, and we're on that line but off the end.
# in this case, loc_index gives a nearby node
# so either p_other is also on the line, and the answer
# is ('node',loc_index)
# or it's not on the line, and the answer is (None,None)
orient = robust_predicates.orientation(p,
self.nodes['x'],
p_other)
if orient!=0.0:
return (None,None)
if ordered(p,self.nodes['x'][loc_index],p_other):
return ('node',loc_index)
else:
return (None,None)
elif dim==2:
# use that to get a half-edge facing p...
# had done this, but loc_index is already a half edge
# he_original = he = self.halfedge(loc_index,0)
he_original = he = loc_index
# make sure we got the one facing out
if he.cell()>=0:
he=he.opposite()
assert he.cell()<0
# brute force it
while 1:
# does this edge, or one of it's nodes, fit the bill?
n_rev=he.node_rev()
n_fwd=he.node_fwd()
o_j=robust_predicates.orientation(p,
self.nodes['x'][n_rev],
self.nodes['x'][n_fwd])
if o_j<0:
# this edge is facing away from p - not a candidate.
pass
else:
# note that we could be collinear, o_j==0.0.
o_rev=robust_predicates.orientation(p,self.nodes['x'][n_rev],p_other)
o_fwd=robust_predicates.orientation(p,self.nodes['x'][n_fwd],p_other)
if o_rev == 0.0:
if o_fwd == 0.0:
assert o_j==0.0
if ordered(p,self.nodes['x'][n_rev],self.nodes['x'][n_fwd]):
return ('node',n_rev)
else:
return ('node',n_fwd)
else:
return ('node',n_rev)
elif o_rev>0:
if o_fwd<0:
# found the edge!
return ('edge',he) # had been he.j
elif o_fwd==0:
return ('node',n_fwd)
else:
# the whole edge is on the wrong side of the segment
pass
else: # o_rev<0
pass
he=he.fwd()
if he==he_original:
# none satisfied the intersection
return (None,None)
def locate_for_traversal(self,p,p_other):
""" Given a point [x,y], reformat the result of
self.locate() to be compatible with the traversal
algorithm below. In cases where p is outside the
existing cells/edges/nodes, use the combination of p and p_other
to figure out the first element which would be hit.
"""
# Here - figure out which cell, edge or node corresponds to pB
loc_face,loc_type,loc_index=self.locate(p)
# not ready for ending point far away, outside
if loc_type in [self.OUTSIDE_AFFINE_HULL,self.OUTSIDE_CONVEX_HULL]:
return self.locate_for_traversal_outside(p,p_other,loc_face,loc_type,loc_index)
elif loc_type == self.IN_VERTEX:
if loc_face == self.INF_CELL:
feat=('node', loc_index)
else:
feat=('node', self.cells['nodes'][loc_face, loc_index])
elif loc_type == self.IN_EDGE:
# This should be a half-edge.
# The half-edge is chosen such that it either faces p_other, or
# if all four points are collinear, the ordering is rev -- p -- fwd -- p_other
# or rev -- p -- p_other -- fwd.
he=self.half_edge(loc_index,0) # start with arbitrary orientation
p_rev,p_fwd = self.nodes['x'][ he.nodes() ]
o_p_other = robust_predicates.orientation(p_other, p_rev, p_fwd)
if o_p==0.0:
# should this use rel_ordered instead?
if ordered(p_rev,p,p_other):
# good - we're looking along, from rev to fwd
pass
else:
he=he.opposite()
elif o_p<0:
he=he.opposite()
else:
pass
feat=('edge', he)
elif loc_type == self.IN_FACE:
feat=('cell', loc_face)
else:
assert False # shouldn't happen
return feat
def gen_intersected_elements(self,nA=None,nB=None,pA=None,pB=None):
"""
This is a new take on find_intersected_elements, with changes:
1. Either nodes or arbitrary points can be given
2. Elements are returned as a generator, rather than compiled into a list
and returned all at once.
3. Traversing along an edge was implied in the output of find_intersected_elements,
but is explicitly included here as a node--half_edge--node sequence.
returns a history of the elements traversed.
this includes:
('node',<node index>)
('edge',<half edge>)
('cell',<cell index>)
Notes:
The starting and ending features are included. If points were given
instead of nodes, then the feature here may be a cell, edge or node.
When the point is outside the convex hull or affine hull, then there is not a
corresponding feature (since otherwise one would assume that the feature
is truly intersected). The first feature returned is simply the first feature
encountered along the path, necessarily an edge or node, not a face.
"""
# verify that it was called correctly
if (nA is not None) and (nB is not None):
assert nA!=nB
assert (nA is None) or (not self.nodes['deleted'][nA])
assert (nB is None) or (not self.nodes['deleted'][nB])
assert (nA is None) != (pA is None)
assert (nB is None) != (pB is None)
dim=self.dim()
if nA is not None:
A=self.nodes['x'][nA]
trav=('node',nA)
else:
A=pA # trav set below
if nB is not None:
B=self.nodes['x'][nB]
end=('node',nB)
else:
B=pB # trav set below
if nA is None:
trav=self.locate_for_traversal(A,B)
if trav[0] is None:
return # there are not intersections
if nB is None:
end=self.locate_for_traversal(B,A)
# but the orientation of an edge has to be flipped
if end[0]=='edge':
end=(end[0],end[1].opposite())
# keep tracks of features crossed, including starting/ending
assert trav[0] is not None
history=[trav]
yield trav
if trav==end:
return
if dim==0:
# already yielded the one possible intersection
# but this case should be caught by the return just above
assert False
return
elif dim==1:
# in the case where p -- p_other crosses the 1-dimensional set of
# nodes, trav==end, and we already returned above.
# otherwise, we walk along the edges and nodes
if trav[0]=='node': # get a first half-edge going in the correct direction
n_nbrs=self.node_to_nodes(trav[1])
for n_nbr in n_nbrs:
if (ordered( A,
self.nodes['x'][n_nbr],
B ) or
np.all(B==self.nodes['x'][n_nbr])):
he=self.nodes_to_halfedge(nA,n_nbr)
break
else:
assert False
trav=('edge',he)
history.append(trav)
yield trav
else:
assert trav[0]=='edge'
he=trav[1]
while trav != end:
trav=('node',he.node_fwd())
history.append(trav)
yield trav
if trav==end:
break
he=he.fwd()
trav=('edge',he)
history.append(trav)
yield trav
return
else: # dim==2
while trav!=end:
if trav[0]=='node':
# Crossing through a node
ntrav=trav[1]
N=self.nodes['x'][ntrav]
for c in self.node_to_cells(ntrav):
cn=self.cell_to_nodes(c)
# print "At node %d, checking cell %d (%s)"%(ntrav,c,cn)
ci_trav=list(cn).index(ntrav) # index of ntrav in cell c
# the other two nodes of the cell
nD=cn[(ci_trav+1)%3]
nE=cn[(ci_trav+2)%3]
# maybe this can be folded in below
#if end[0]=='node' and (end[1] in [nD,nE]):
# # trav=('node',nB)
# trav=end
# break
# Here
D=self.nodes['x'][nD]
oD=robust_predicates.orientation( A,B,D )
if oD>0:
# D is to the right of E, and our target, A is to the right
# of both, so this cell is not good
continue
if oD==0 and np.dot(B-A,D-N)>0: # ordered(A,N,D):
# used to test for ordered(N,D,B), but B could be on the
# edge, at D, or beyond D. Test with A to know that the
# edge is going in the right direction, then check for where
# B might fall.
# HERE: This is a problem, though, because it's possible for
# A==N.
# What I really want is for A-B to be in the same direction
# as N-D.
# could test a dot product, but that invites some roundoff
# in sinister situations. The differencing is probably not
# a big deal - if we can represent the absolute values
# distinctly, then we can certainly represent their differences.
# the multiplication could lead to numbers which are too small
# to represent. Any of these issues require absurdly small
# values/offsets in the input nodes, and we have already
# established that these all lie on a line and are distinct.
#
# The possible positive orderings
# [A=N] -- D -- B
# A -- N -- D -- B
# [A=N] -- [D==B]
# [A=N] -- B -- D
#
# fell exactly on the A-B segment, and is in the
# right direction
# Announce the edge, which could be the end of the traversal
trav=('edge',self.nodes_to_halfedge(ntrav,nD))
history.append(trav)
yield trav
if trav==end:
return
# And on to the node:
trav=('node',nD)
break # and we've completed this step
E=self.nodes['x'][nE]
oE=robust_predicates.orientation( A,B,E )
if oE<0:
# A is to the left of E
continue
if oE==0 and np.dot(B-A,E-N): # ordered(A,N,E):
# Same as above - establish that it goes in the correct direction.
# again, the dot product is mildly dangerous
# again - fell exactly on the segment A-B, it's in the right
# direction.
trav=('edge',self.nodes_to_halfedge(ntrav,nE))
history.append(trav)
yield trav
if trav==end:
return
trav=('node',nE)
break
# if we get to here, then A--B passes through the cell, and either
# we stop at this cell, or A--B crosses the opposite edge:
trav=('cell',c)
if trav==end:
# don't try to traverse the cell - we're done!
# trav will get appended below
break
else:
# announce the cell, and move on to the edge
history.append(trav)
yield trav
trav=None # avoid confusion, clear this out
# AB crosses an edge - record the edge, and the side we are
# approaching from:
j=self.cell_to_edges(c)[ (ci_trav+1)%3 ]
j_nbrs=self.edge_to_cells(j)
if j_nbrs[0]==c:
trav=('edge',self.halfedge(j,0))
elif j_nbrs[1]==c:
trav=('edge',self.halfedge(j,1))
else:
assert False
# making sure I got the 0/1 correct
assert trav[1].cell()==c
break
elif trav[0]=='edge':
# trav[1].cell() is the cell we just left
# this then is the half-edge facing the cell we're
# entering
he=trav[1].opposite()
c_next=he.cell()
trav=('cell',c_next)
if trav==end:
pass # done!
else:
# have to choose between the opposite two edges or their common
# node.
# record the cell we just passed through
history.append(trav)
yield trav
nD=he.fwd().node_fwd()
# print "Entering cell %d with nodes %s"%(c_next,self.cell_to_nodes(c_next))
oD=robust_predicates.orientation( A,B, self.nodes['x'][nD] )
if oD==0:
trav=('node',nD)
elif oD>0:
# going to cross the edge "on the right" (I think)
trav=('edge',he.fwd())
else:
# going to cross the edge "on the left"
trav=('edge',he.rev())
else:
assert False
history.append(trav)
yield trav
return
def add_constraint(self,nA,nB):
jAB=self.nodes_to_edge([nA,nB])
if jAB is not None:
# no work to do - topology already good.
if self.edges['constrained'][jAB]:
raise DuplicateConstraint(nodes=[nA,nB])
self.edges['constrained'][jAB]=True
return jAB
# inserting an edge from 0-5.
int_elts=self.find_intersected_elements(nA,nB)
# Now we need to record the two holes bordered the new edge:
left_nodes=[nA] # will be recorded CW
right_nodes=[nA] # will be recorded CCW
# Iterate over the crossed elements, checking that the new
# edge doesn't encounter any collinear nodes or other constrained
# edges. Build up the nodes of the holes at the same time.
dead_cells=[]
dead_edges=[]
for elt in int_elts[1:-1]:
if elt[0]=='node':
raise self.ConstraintCollinearNode("Constraint intersects a node",
node=elt[1])
if elt[0]=='cell':
dead_cells.append(elt[1])
if elt[0]=='edge':
if self.edges['constrained'][ elt[1].j ]:
raise IntersectingConstraints("Constraint intersects a constraint",
edge=elt[1].j )
next_left=elt[1].node_fwd()
if left_nodes[-1]!=next_left:
left_nodes.append(next_left)
next_right= elt[1].node_rev()
if right_nodes[-1]!=next_right:
right_nodes.append(next_right)
dead_edges.append(elt[1].j)
left_nodes.append(nB)
right_nodes.append(nB)
left_nodes = left_nodes[::-1]
# tricky business here
# but the delaunay business is only invoked on node operations - leaving
# the edge/cell operations free and clear to violate invariants
for c in dead_cells:
self.delete_cell(c)
for j in dead_edges:
self.delete_edge(j)
j=self.add_edge(nodes=[nA,nB],constrained=True)
# and then sew up the holes!
self.fill_hole( left_nodes )
self.fill_hole( right_nodes )
return j
def remove_constraint(self,nA=None,nB=None,j=None):
""" Assumes that there exists a constraint between nodes
nA and nB (or that the edge given by j is constrained).
The constrained flag is removed for the edge, and if
the Delaunay criterion is no longer satisfied edges are
flipped as needed.
"""
if j is None:
j=self.nodes_to_edge([nA,nB])
assert self.edges['constrained'][j]
self.edges['constrained'][j]=False
c1,c2=self.edge_to_cells(j)
if (c1>=0) and (c2>=0):
c=c1 # can we just propagate from one side?
for ni,n in enumerate(self.cell_to_nodes(c1)):
if n not in self.edges['nodes'][j]:
self.propagating_flip(c1,ni)
break
if self.post_check:
self.check_local_delaunay()
def node_to_constraints(self,n):
return [j
for j in self.node_to_edges(n)
if self.edges['constrained'][j]]
def init_from_grid(self,g,node_coordinate='x',set_valid=False,
valid_min_area=1e-2,on_intersection='exception'):
"""
Initialize from the nodes and edges of an existing grid, making
existing edges constrained
node_coordinate: supply the name of an alternate coordinate defined
on the nodes. g.nodes[node_coordinate] should be an [Ncell,2] field.
set_valid: if True, add a 'valid' field for cells, and set to Tru
for cells of the triangulation that have finite area and fall
within the src grid g.
on_intersection:
'exception': intersecting edges in the input grid raise an error.
'insert': at intersecting edges construct and insert a new node.
"""
if set_valid:
self.add_cell_field('valid',np.zeros(self.Ncells(),np.bool8),
on_exists='pass')
# Seems that the indices will get misaligned if there are
# deleted nodes.
# TODO: add node index mapping code here.
assert np.all( ~g.nodes['deleted'] )
self.bulk_init(g.nodes[node_coordinate][~g.nodes['deleted']])
all_segs=[ g.edges['nodes'][j]
for j in g.valid_edge_iter() ]
while all_segs:
nodes=all_segs.pop(0)
if on_intersection=='exception':
self.add_constraint( *nodes )
else:
self.add_constraint_and_intersections( *nodes )
if set_valid:
from shapely import geometry
self.cells['valid']=~self.cells['deleted']
# Maybe unnecessary. Had some issues with 0 fill values here.
self.cells['_area']=np.nan
self.cells['_center']=np.nan
areas=self.cells_area()
self.cells['valid'][areas<=valid_min_area]=False
poly=g.boundary_polygon()
centroids=self.cells_centroid()
for c in np.nonzero(self.cells['valid'])[0]:
if not poly.contains( geometry.Point(centroids[c]) ):
self.cells['valid'][c]=False
def add_constraint_and_intersections(self,nA,nB,on_exists='exception'):
"""
Like add_constraint, but in the case of intersections with existing constraints
insert new nodes as needed and update existing and new constrained edges.
"""
all_segs=[ [nA,nB] ]
result_nodes=[nA]
result_edges=[]
while all_segs:
nA,nB=all_segs.pop(0)
try:
j=self.add_constraint(nA,nB)
except IntersectingConstraints as exc:
if isinstance(exc,ConstraintCollinearNode):
all_segs.insert(0, [nA,exc.node] )
all_segs.insert(1, [exc.node,nB] )
continue
else:
j_other=exc.edge
assert j_other is not None
segA=self.nodes['x'][self.edges['nodes'][j_other]]
segB=self.nodes['x'][[nA,nB]]
x_int,alphas=segment_segment_intersection(segA,segB)
# Getting an error where x_int is one of the endpoints of
# segA. This is while inserting a contour that ends on
# the boundary.
n_new=self.split_constraint(j=j_other,x=x_int)
if nB!=n_new:
all_segs.insert(0,[n_new,nB])
if nA!=n_new:
all_segs.insert(0,[nA,n_new])
continue
except DuplicateConstraint as exc:
if on_exists=='exception':
raise
elif on_exists=='ignore':
j=self.nodes_to_edge(nA,nB)
elif on_exists=='stop':
break
else:
assert False,"Bad value %s for on_exists"%on_exists
result_nodes.append(nB)
assert j is not None
result_edges.append(j)
return result_nodes,result_edges
def split_constraint(self,x,j):
nodes_other=self.edges['nodes'][j].copy()
j_data=unstructured_grid.rec_to_dict(self.edges[j].copy())
self.remove_constraint(j=j)
n_new=self.add_or_find_node(x=x)
js=[]
if nodes_other[0]!=n_new:
js.append( self.add_constraint(nodes_other[0],n_new) )
if n_new!=nodes_other[1]:
js.append( self.add_constraint(n_new,nodes_other[1]) )
for f in j_data:
if f in ['nodes','cells','deleted']: continue
self.edges[f][js]=j_data[f]
return n_new
def add_constrained_linestring(self,coords,
on_intersection='exception',
on_exists='exception',
closed=False):
"""
Optionally insert new nodes as needed along
the way.
on_intersection: when a constraint intersects an existing constraint,
'exception' => re-raise the exception
'insert' => insert a constructed node, and divide the new and old constraints.
on_exists' => when a constraint to be inserted already exists,
'exception' => re-raise the exception
'ignore' => keep going
'stop' => return
closed: Whether the first and last nodes are also connected
returns [list of nodes],[list of edges]
"""
nodes=[self.add_or_find_node(x=x)
for x in coords]
result_nodes=[nodes[0]]
result_edges=[]
if not closed:
ab_list=zip(nodes[:-1],nodes[1:])
else:
ab_list=zip(nodes,np.roll(nodes,-1))
for a,b in ab_list:
if on_intersection=='insert':
sub_nodes,sub_edges=self.add_constraint_and_intersections(a,b,
on_exists=on_exists)
result_nodes+=sub_nodes[1:]
result_edges+=sub_edges
if (on_exists=='stop') and (sub_nodes[-1]!=b):
print("Stopping early")
break
else:
try:
j=self.add_constraint(a,b)
except DuplicateConstraint as exc:
if on_exists=='exception':
raise
elif on_exists=='stop':
break
elif on_exists=='ignore':
j=self.nodes_to_edge(a,b)
result_nodes.append(b)
result_edges.append(j)
return result_nodes,result_edges
def bulk_init_slow(self,points):
raise Exception("No - it's really slow. Don't do this.")
def bulk_init(self,points): # ExactDelaunay
if spatial is None:
return self.bulk_init_slow(points)
# looks like centering this affects how many cells Delaunay
# finds. That's lame.
sdt = spatial.Delaunay(points-points.mean(axis=0))
self.nodes=np.zeros( len(points), self.node_dtype)
self.cells=np.zeros( sdt.vertices.shape[0], self.cell_dtype)
self.nodes['x']=points
self.cells['nodes']=sdt.vertices
# looks like it's CGAL style:
# neighbor[1] shares nodes[0] and nodes[2]
# vertices are CCW
for c in range(self.Ncells()):
for i,(a,b) in enumerate(circular_pairs(self.cells['nodes'][c])):
# first time - that would be i=0, and the first two nodes.
# but for neighbors, it's indexed by the opposite node. so the edge
# connected the nodes[0]--nodes[1] corresponds with neighbor 2.
c_nbr=sdt.neighbors[c,(i+2)%3]
# c_nbr==-1 on convex hull.
# only worry about cases where c is larger.
if c<c_nbr:
continue
if c_nbr<0:
c_nbr=self.INF_CELL
j=self.add_edge(nodes=[a,b],
cells=[c,c_nbr])
# and record in the cell, too
self.cells['edges'][c,i]=j
if c_nbr!=self.INF_CELL:
nbr_nodes=self.cells['nodes'][c_nbr]
for i_nbr in [0,1,2]:
if nbr_nodes[i_nbr]==b and nbr_nodes[(i_nbr+1)%3]==a:
self.cells['edges'][c_nbr,i_nbr]=j
break
else:
assert False
def constrained_centers(self):
"""
For cells with no constrained edges, return the circumcenter.
If return centroid.
The details may evolve, but the purpose is to get a point which
is inside the domain and can be used like a circumcenter (i.e.
approximately lies on the medial axis of the continous boundary).
"""
ccs=self.cells_center(refresh=True) # circumcenters
centroids=self.cells_centroid()
e2c=self.edge_to_cells() # recalc=True)
cell_with_constraint=np.unique( e2c[ self.edges['constrained']] )
result=ccs.copy()
result[cell_with_constraint] = centroids[cell_with_constraint]
return result
# TODO: def constrained_radii(self):
# Calculate the usual circumradius, but for centers which were
# adjusted due to a constrained edge also check point-segment
# distances.
def point_clearance(self,x,hint=None):
"""
Return the distance from point x=[p_x,p_y] to the nearest
node or constrained segment of the triangulation.
hint: To speed up consecutive queries with spatial locality, pass
a dictionary, and a new dictionary will be returned as the second
item in a tuple. The initial dictionary can be empty, or 'c':int
to give a starting face of the triangulation.
"""
if hint is not None:
loc_face,loc_type,loc_index=self.locate(x,**hint)
else:
loc_face,loc_type,loc_index=self.locate(x)
assert loc_type in (self.IN_VERTEX, self.IN_EDGE, self.IN_FACE)
face_nodes=self.cells['nodes'][loc_face]
min_clearance=dist( self.nodes['x'][face_nodes], x ).min()
for j in self.cell_to_edges(loc_face):
if self.edges['constrained'][j]:
j_clearance=point_segment_distance(x, self.nodes['x'][self.edges['nodes'][j]] )
min_clearance=min(min_clearance,j_clearance)
if hint is not None:
return min_clearance,{'c':loc_face}
else:
return min_clearance
# Issues:
# Calls like edge_to_cells do not scale well right now. In particular,
# it would be better in this code to always specify the edge, so that
# a full scan isn't necessary.
| mit |
kylerbrown/scikit-learn | sklearn/ensemble/gradient_boosting.py | 126 | 65552 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
from time import time
import numbers
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, check_array, check_X_y, column_or_1d
from ..utils import check_consistent_length, deprecated
from ..utils.extmath import logsumexp
from ..utils.fixes import expit, bincount
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted, NotFittedError
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE, TREE_LEAF
from ..tree._tree import PresortBestSplitter
from ..tree._tree import FriedmanMSE
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
criterion, splitter, random_state):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=criterion,
splitter=splitter,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# init criterion and splitter
criterion = FriedmanMSE(1)
splitter = PresortBestSplitter(criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, criterion, splitter,
random_state)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@deprecated(" and will be removed in 0.19")
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, loss_.K]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def _validate_y(self, y):
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C")
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
| bsd-3-clause |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/mpl_toolkits/axes_grid1/axes_grid.py | 7 | 31905 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib.cbook as cbook
import matplotlib.pyplot as plt
import matplotlib.axes as maxes
#import matplotlib.colorbar as mcolorbar
from . import colorbar as mcolorbar
import matplotlib as mpl
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import matplotlib.ticker as ticker
from matplotlib.gridspec import SubplotSpec
from .axes_divider import Size, SubplotDivider, LocatableAxes, Divider
def _extend_axes_pad(value):
# Check whether a list/tuple/array or scalar has been passed
ret = value
if not hasattr(ret, "__getitem__"):
ret = (value, value)
return ret
def _tick_only(ax, bottom_on, left_on):
bottom_off = not bottom_on
left_off = not left_on
# [l.set_visible(bottom_off) for l in ax.get_xticklabels()]
# [l.set_visible(left_off) for l in ax.get_yticklabels()]
# ax.xaxis.label.set_visible(bottom_off)
# ax.yaxis.label.set_visible(left_off)
ax.axis["bottom"].toggle(ticklabels=bottom_off, label=bottom_off)
ax.axis["left"].toggle(ticklabels=left_off, label=left_off)
class Colorbar(mcolorbar.Colorbar):
def _config_axes_deprecated(self, X, Y):
'''
Make an axes patch and outline.
'''
ax = self.ax
ax.set_frame_on(False)
ax.set_navigate(False)
xy = self._outline(X, Y)
ax.update_datalim(xy)
ax.set_xlim(*ax.dataLim.intervalx)
ax.set_ylim(*ax.dataLim.intervaly)
self.outline = mlines.Line2D(xy[:, 0], xy[:, 1],
color=mpl.rcParams['axes.edgecolor'],
linewidth=mpl.rcParams['axes.linewidth'])
ax.add_artist(self.outline)
self.outline.set_clip_box(None)
self.outline.set_clip_path(None)
c = mpl.rcParams['axes.facecolor']
self.patch = mpatches.Polygon(xy, edgecolor=c,
facecolor=c,
linewidth=0.01,
zorder=-1)
ax.add_artist(self.patch)
ticks, ticklabels, offset_string = self._ticker()
if self.orientation == 'vertical':
ax.set_yticks(ticks)
ax.set_yticklabels(ticklabels)
ax.yaxis.get_major_formatter().set_offset_string(offset_string)
else:
ax.set_xticks(ticks)
ax.set_xticklabels(ticklabels)
ax.xaxis.get_major_formatter().set_offset_string(offset_string)
class CbarAxesBase(object):
def colorbar(self, mappable, **kwargs):
locator = kwargs.pop("locator", None)
if locator is None:
if "ticks" not in kwargs:
kwargs["ticks"] = ticker.MaxNLocator(5)
if locator is not None:
if "ticks" in kwargs:
raise ValueError("Either *locator* or *ticks* need" +
" to be given, not both")
else:
kwargs["ticks"] = locator
self.hold(True)
if self.orientation in ["top", "bottom"]:
orientation = "horizontal"
else:
orientation = "vertical"
cb = Colorbar(self, mappable, orientation=orientation, **kwargs)
self._config_axes()
def on_changed(m):
#print 'calling on changed', m.get_cmap().name
cb.set_cmap(m.get_cmap())
cb.set_clim(m.get_clim())
cb.update_bruteforce(m)
self.cbid = mappable.callbacksSM.connect('changed', on_changed)
mappable.colorbar = cb
self.locator = cb.cbar_axis.get_major_locator()
return cb
def _config_axes(self):
'''
Make an axes patch and outline.
'''
ax = self
ax.set_navigate(False)
ax.axis[:].toggle(all=False)
b = self._default_label_on
ax.axis[self.orientation].toggle(all=b)
# for axis in ax.axis.values():
# axis.major_ticks.set_visible(False)
# axis.minor_ticks.set_visible(False)
# axis.major_ticklabels.set_visible(False)
# axis.minor_ticklabels.set_visible(False)
# axis.label.set_visible(False)
# axis = ax.axis[self.orientation]
# axis.major_ticks.set_visible(True)
# axis.minor_ticks.set_visible(True)
#axis.major_ticklabels.set_size(
# int(axis.major_ticklabels.get_size()*.9))
#axis.major_tick_pad = 3
# axis.major_ticklabels.set_visible(b)
# axis.minor_ticklabels.set_visible(b)
# axis.label.set_visible(b)
def toggle_label(self, b):
self._default_label_on = b
axis = self.axis[self.orientation]
axis.toggle(ticklabels=b, label=b)
#axis.major_ticklabels.set_visible(b)
#axis.minor_ticklabels.set_visible(b)
#axis.label.set_visible(b)
class CbarAxes(CbarAxesBase, LocatableAxes):
def __init__(self, *kl, **kwargs):
orientation = kwargs.pop("orientation", None)
if orientation is None:
raise ValueError("orientation must be specified")
self.orientation = orientation
self._default_label_on = True
self.locator = None
super(LocatableAxes, self).__init__(*kl, **kwargs)
def cla(self):
super(LocatableAxes, self).cla()
self._config_axes()
class Grid(object):
"""
A class that creates a grid of Axes. In matplotlib, the axes
location (and size) is specified in the normalized figure
coordinates. This may not be ideal for images that needs to be
displayed with a given aspect ratio. For example, displaying
images of a same size with some fixed padding between them cannot
be easily done in matplotlib. AxesGrid is used in such case.
"""
_defaultLocatableAxesClass = LocatableAxes
def __init__(self, fig,
rect,
nrows_ncols,
ngrids=None,
direction="row",
axes_pad=0.02,
add_all=True,
share_all=False,
share_x=True,
share_y=True,
#aspect=True,
label_mode="L",
axes_class=None,
):
"""
Build an :class:`Grid` instance with a grid nrows*ncols
:class:`~matplotlib.axes.Axes` in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* (in
:class:`~matplotlib.figure.Figure` coordinates) or
the subplot position code (e.g., "121").
Optional keyword arguments:
================ ======== =========================================
Keyword Default Description
================ ======== =========================================
direction "row" [ "row" | "column" ]
axes_pad 0.02 float| pad between axes given in inches
or tuple-like of floats,
(horizontal padding, vertical padding)
add_all True [ True | False ]
share_all False [ True | False ]
share_x True [ True | False ]
share_y True [ True | False ]
label_mode "L" [ "L" | "1" | "all" ]
axes_class None a type object which must be a subclass
of :class:`~matplotlib.axes.Axes`
================ ======== =========================================
"""
self._nrows, self._ncols = nrows_ncols
if ngrids is None:
ngrids = self._nrows * self._ncols
else:
if (ngrids > self._nrows * self._ncols) or (ngrids <= 0):
raise Exception("")
self.ngrids = ngrids
self._init_axes_pad(axes_pad)
if direction not in ["column", "row"]:
raise Exception("")
self._direction = direction
if axes_class is None:
axes_class = self._defaultLocatableAxesClass
axes_class_args = {}
else:
if (type(axes_class)) == type and \
issubclass(axes_class,
self._defaultLocatableAxesClass.Axes):
axes_class_args = {}
else:
axes_class, axes_class_args = axes_class
self.axes_all = []
self.axes_column = [[] for _ in range(self._ncols)]
self.axes_row = [[] for _ in range(self._nrows)]
h = []
v = []
if cbook.is_string_like(rect) or cbook.is_numlike(rect):
self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
aspect=False)
elif isinstance(rect, SubplotSpec):
self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
aspect=False)
elif len(rect) == 3:
kw = dict(horizontal=h, vertical=v, aspect=False)
self._divider = SubplotDivider(fig, *rect, **kw)
elif len(rect) == 4:
self._divider = Divider(fig, rect, horizontal=h, vertical=v,
aspect=False)
else:
raise Exception("")
rect = self._divider.get_position()
# reference axes
self._column_refax = [None for _ in range(self._ncols)]
self._row_refax = [None for _ in range(self._nrows)]
self._refax = None
for i in range(self.ngrids):
col, row = self._get_col_row(i)
if share_all:
sharex = self._refax
sharey = self._refax
else:
if share_x:
sharex = self._column_refax[col]
else:
sharex = None
if share_y:
sharey = self._row_refax[row]
else:
sharey = None
ax = axes_class(fig, rect, sharex=sharex, sharey=sharey,
**axes_class_args)
if share_all:
if self._refax is None:
self._refax = ax
else:
if sharex is None:
self._column_refax[col] = ax
if sharey is None:
self._row_refax[row] = ax
self.axes_all.append(ax)
self.axes_column[col].append(ax)
self.axes_row[row].append(ax)
self.axes_llc = self.axes_column[0][-1]
self._update_locators()
if add_all:
for ax in self.axes_all:
fig.add_axes(ax)
self.set_label_mode(label_mode)
def _init_axes_pad(self, axes_pad):
axes_pad = _extend_axes_pad(axes_pad)
self._axes_pad = axes_pad
self._horiz_pad_size = Size.Fixed(axes_pad[0])
self._vert_pad_size = Size.Fixed(axes_pad[1])
def _update_locators(self):
h = []
h_ax_pos = []
for _ in self._column_refax:
#if h: h.append(Size.Fixed(self._axes_pad))
if h:
h.append(self._horiz_pad_size)
h_ax_pos.append(len(h))
sz = Size.Scaled(1)
h.append(sz)
v = []
v_ax_pos = []
for _ in self._row_refax[::-1]:
#if v: v.append(Size.Fixed(self._axes_pad))
if v:
v.append(self._vert_pad_size)
v_ax_pos.append(len(v))
sz = Size.Scaled(1)
v.append(sz)
for i in range(self.ngrids):
col, row = self._get_col_row(i)
locator = self._divider.new_locator(nx=h_ax_pos[col],
ny=v_ax_pos[self._nrows - 1 - row])
self.axes_all[i].set_axes_locator(locator)
self._divider.set_horizontal(h)
self._divider.set_vertical(v)
def _get_col_row(self, n):
if self._direction == "column":
col, row = divmod(n, self._nrows)
else:
row, col = divmod(n, self._ncols)
return col, row
# Good to propagate __len__ if we have __getitem__
def __len__(self):
return len(self.axes_all)
def __getitem__(self, i):
return self.axes_all[i]
def get_geometry(self):
"""
get geometry of the grid. Returns a tuple of two integer,
representing number of rows and number of columns.
"""
return self._nrows, self._ncols
def set_axes_pad(self, axes_pad):
"set axes_pad"
self._axes_pad = axes_pad
# These two lines actually differ from ones in _init_axes_pad
self._horiz_pad_size.fixed_size = axes_pad[0]
self._vert_pad_size.fixed_size = axes_pad[1]
def get_axes_pad(self):
"""
get axes_pad
Returns
-------
tuple
Padding in inches, (horizontal pad, vertical pad)
"""
return self._axes_pad
def set_aspect(self, aspect):
"set aspect"
self._divider.set_aspect(aspect)
def get_aspect(self):
"get aspect"
return self._divider.get_aspect()
def set_label_mode(self, mode):
"set label_mode"
if mode == "all":
for ax in self.axes_all:
_tick_only(ax, False, False)
elif mode == "L":
# left-most axes
for ax in self.axes_column[0][:-1]:
_tick_only(ax, bottom_on=True, left_on=False)
# lower-left axes
ax = self.axes_column[0][-1]
_tick_only(ax, bottom_on=False, left_on=False)
for col in self.axes_column[1:]:
# axes with no labels
for ax in col[:-1]:
_tick_only(ax, bottom_on=True, left_on=True)
# bottom
ax = col[-1]
_tick_only(ax, bottom_on=False, left_on=True)
elif mode == "1":
for ax in self.axes_all:
_tick_only(ax, bottom_on=True, left_on=True)
ax = self.axes_llc
_tick_only(ax, bottom_on=False, left_on=False)
def get_divider(self):
return self._divider
def set_axes_locator(self, locator):
self._divider.set_locator(locator)
def get_axes_locator(self):
return self._divider.get_locator()
def get_vsize_hsize(self):
return self._divider.get_vsize_hsize()
# from axes_size import AddList
# vsize = AddList(self._divider.get_vertical())
# hsize = AddList(self._divider.get_horizontal())
# return vsize, hsize
class ImageGrid(Grid):
"""
A class that creates a grid of Axes. In matplotlib, the axes
location (and size) is specified in the normalized figure
coordinates. This may not be ideal for images that needs to be
displayed with a given aspect ratio. For example, displaying
images of a same size with some fixed padding between them cannot
be easily done in matplotlib. ImageGrid is used in such case.
"""
_defaultCbarAxesClass = CbarAxes
def __init__(self, fig,
rect,
nrows_ncols,
ngrids=None,
direction="row",
axes_pad=0.02,
add_all=True,
share_all=False,
aspect=True,
label_mode="L",
cbar_mode=None,
cbar_location="right",
cbar_pad=None,
cbar_size="5%",
cbar_set_cax=True,
axes_class=None,
):
"""
Build an :class:`ImageGrid` instance with a grid nrows*ncols
:class:`~matplotlib.axes.Axes` in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* (in
:class:`~matplotlib.figure.Figure` coordinates) or
the subplot position code (e.g., "121").
Optional keyword arguments:
================ ======== =========================================
Keyword Default Description
================ ======== =========================================
direction "row" [ "row" | "column" ]
axes_pad 0.02 float| pad between axes given in inches
or tuple-like of floats,
(horizontal padding, vertical padding)
add_all True [ True | False ]
share_all False [ True | False ]
aspect True [ True | False ]
label_mode "L" [ "L" | "1" | "all" ]
cbar_mode None [ "each" | "single" | "edge" ]
cbar_location "right" [ "left" | "right" | "bottom" | "top" ]
cbar_pad None
cbar_size "5%"
cbar_set_cax True [ True | False ]
axes_class None a type object which must be a subclass
of axes_grid's subclass of
:class:`~matplotlib.axes.Axes`
================ ======== =========================================
*cbar_set_cax* : if True, each axes in the grid has a cax
attribute that is bind to associated cbar_axes.
"""
self._nrows, self._ncols = nrows_ncols
if ngrids is None:
ngrids = self._nrows * self._ncols
else:
if (ngrids > self._nrows * self._ncols) or (ngrids <= 0):
raise Exception("")
self.ngrids = ngrids
axes_pad = _extend_axes_pad(axes_pad)
self._axes_pad = axes_pad
self._colorbar_mode = cbar_mode
self._colorbar_location = cbar_location
if cbar_pad is None:
# horizontal or vertical arrangement?
if cbar_location in ("left", "right"):
self._colorbar_pad = axes_pad[0]
else:
self._colorbar_pad = axes_pad[1]
else:
self._colorbar_pad = cbar_pad
self._colorbar_size = cbar_size
self._init_axes_pad(axes_pad)
if direction not in ["column", "row"]:
raise Exception("")
self._direction = direction
if axes_class is None:
axes_class = self._defaultLocatableAxesClass
axes_class_args = {}
else:
if isinstance(axes_class, maxes.Axes):
axes_class_args = {}
else:
axes_class, axes_class_args = axes_class
self.axes_all = []
self.axes_column = [[] for _ in range(self._ncols)]
self.axes_row = [[] for _ in range(self._nrows)]
self.cbar_axes = []
h = []
v = []
if cbook.is_string_like(rect) or cbook.is_numlike(rect):
self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
aspect=aspect)
elif isinstance(rect, SubplotSpec):
self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
aspect=aspect)
elif len(rect) == 3:
kw = dict(horizontal=h, vertical=v, aspect=aspect)
self._divider = SubplotDivider(fig, *rect, **kw)
elif len(rect) == 4:
self._divider = Divider(fig, rect, horizontal=h, vertical=v,
aspect=aspect)
else:
raise Exception("")
rect = self._divider.get_position()
# reference axes
self._column_refax = [None for _ in range(self._ncols)]
self._row_refax = [None for _ in range(self._nrows)]
self._refax = None
for i in range(self.ngrids):
col, row = self._get_col_row(i)
if share_all:
if self.axes_all:
sharex = self.axes_all[0]
sharey = self.axes_all[0]
else:
sharex = None
sharey = None
else:
sharex = self._column_refax[col]
sharey = self._row_refax[row]
ax = axes_class(fig, rect, sharex=sharex, sharey=sharey,
**axes_class_args)
self.axes_all.append(ax)
self.axes_column[col].append(ax)
self.axes_row[row].append(ax)
if share_all:
if self._refax is None:
self._refax = ax
if sharex is None:
self._column_refax[col] = ax
if sharey is None:
self._row_refax[row] = ax
cax = self._defaultCbarAxesClass(fig, rect,
orientation=self._colorbar_location)
self.cbar_axes.append(cax)
self.axes_llc = self.axes_column[0][-1]
self._update_locators()
if add_all:
for ax in self.axes_all+self.cbar_axes:
fig.add_axes(ax)
if cbar_set_cax:
if self._colorbar_mode == "single":
for ax in self.axes_all:
ax.cax = self.cbar_axes[0]
else:
for ax, cax in zip(self.axes_all, self.cbar_axes):
ax.cax = cax
self.set_label_mode(label_mode)
def _update_locators(self):
h = []
v = []
h_ax_pos = []
h_cb_pos = []
if (self._colorbar_mode == "single" and
self._colorbar_location in ('left', 'bottom')):
if self._colorbar_location == "left":
#sz = Size.Fraction(Size.AxesX(self.axes_llc), self._nrows)
sz = Size.Fraction(self._nrows, Size.AxesX(self.axes_llc))
h.append(Size.from_any(self._colorbar_size, sz))
h.append(Size.from_any(self._colorbar_pad, sz))
locator = self._divider.new_locator(nx=0, ny=0, ny1=-1)
elif self._colorbar_location == "bottom":
#sz = Size.Fraction(Size.AxesY(self.axes_llc), self._ncols)
sz = Size.Fraction(self._ncols, Size.AxesY(self.axes_llc))
v.append(Size.from_any(self._colorbar_size, sz))
v.append(Size.from_any(self._colorbar_pad, sz))
locator = self._divider.new_locator(nx=0, nx1=-1, ny=0)
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(False)
self.cbar_axes[0].set_axes_locator(locator)
self.cbar_axes[0].set_visible(True)
for col, ax in enumerate(self.axes_row[0]):
if h:
h.append(self._horiz_pad_size) # Size.Fixed(self._axes_pad))
if ax:
sz = Size.AxesX(ax, aspect="axes", ref_ax=self.axes_all[0])
else:
sz = Size.AxesX(self.axes_all[0],
aspect="axes", ref_ax=self.axes_all[0])
if (self._colorbar_mode == "each" or
(self._colorbar_mode == 'edge' and
col == 0)) and self._colorbar_location == "left":
h_cb_pos.append(len(h))
h.append(Size.from_any(self._colorbar_size, sz))
h.append(Size.from_any(self._colorbar_pad, sz))
h_ax_pos.append(len(h))
h.append(sz)
if ((self._colorbar_mode == "each" or
(self._colorbar_mode == 'edge' and
col == self._ncols - 1)) and
self._colorbar_location == "right"):
h.append(Size.from_any(self._colorbar_pad, sz))
h_cb_pos.append(len(h))
h.append(Size.from_any(self._colorbar_size, sz))
v_ax_pos = []
v_cb_pos = []
for row, ax in enumerate(self.axes_column[0][::-1]):
if v:
v.append(self._vert_pad_size) # Size.Fixed(self._axes_pad))
if ax:
sz = Size.AxesY(ax, aspect="axes", ref_ax=self.axes_all[0])
else:
sz = Size.AxesY(self.axes_all[0],
aspect="axes", ref_ax=self.axes_all[0])
if (self._colorbar_mode == "each" or
(self._colorbar_mode == 'edge' and
row == 0)) and self._colorbar_location == "bottom":
v_cb_pos.append(len(v))
v.append(Size.from_any(self._colorbar_size, sz))
v.append(Size.from_any(self._colorbar_pad, sz))
v_ax_pos.append(len(v))
v.append(sz)
if ((self._colorbar_mode == "each" or
(self._colorbar_mode == 'edge' and
row == self._nrows - 1)) and
self._colorbar_location == "top"):
v.append(Size.from_any(self._colorbar_pad, sz))
v_cb_pos.append(len(v))
v.append(Size.from_any(self._colorbar_size, sz))
for i in range(self.ngrids):
col, row = self._get_col_row(i)
#locator = self._divider.new_locator(nx=4*col,
# ny=2*(self._nrows - row - 1))
locator = self._divider.new_locator(nx=h_ax_pos[col],
ny=v_ax_pos[self._nrows-1-row])
self.axes_all[i].set_axes_locator(locator)
if self._colorbar_mode == "each":
if self._colorbar_location in ("right", "left"):
locator = self._divider.new_locator(
nx=h_cb_pos[col], ny=v_ax_pos[self._nrows - 1 - row])
elif self._colorbar_location in ("top", "bottom"):
locator = self._divider.new_locator(
nx=h_ax_pos[col], ny=v_cb_pos[self._nrows - 1 - row])
self.cbar_axes[i].set_axes_locator(locator)
elif self._colorbar_mode == 'edge':
if ((self._colorbar_location == 'left' and col == 0) or
(self._colorbar_location == 'right'
and col == self._ncols-1)):
locator = self._divider.new_locator(
nx=h_cb_pos[0], ny=v_ax_pos[self._nrows -1 - row])
self.cbar_axes[row].set_axes_locator(locator)
elif ((self._colorbar_location == 'bottom' and
row == self._nrows - 1) or
(self._colorbar_location == 'top' and row == 0)):
locator = self._divider.new_locator(nx=h_ax_pos[col],
ny=v_cb_pos[0])
self.cbar_axes[col].set_axes_locator(locator)
if self._colorbar_mode == "single":
if self._colorbar_location == "right":
#sz = Size.Fraction(Size.AxesX(self.axes_llc), self._nrows)
sz = Size.Fraction(self._nrows, Size.AxesX(self.axes_llc))
h.append(Size.from_any(self._colorbar_pad, sz))
h.append(Size.from_any(self._colorbar_size, sz))
locator = self._divider.new_locator(nx=-2, ny=0, ny1=-1)
elif self._colorbar_location == "top":
#sz = Size.Fraction(Size.AxesY(self.axes_llc), self._ncols)
sz = Size.Fraction(self._ncols, Size.AxesY(self.axes_llc))
v.append(Size.from_any(self._colorbar_pad, sz))
v.append(Size.from_any(self._colorbar_size, sz))
locator = self._divider.new_locator(nx=0, nx1=-1, ny=-2)
if self._colorbar_location in ("right", "top"):
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(False)
self.cbar_axes[0].set_axes_locator(locator)
self.cbar_axes[0].set_visible(True)
elif self._colorbar_mode == "each":
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(True)
elif self._colorbar_mode == "edge":
if self._colorbar_location in ('right', 'left'):
count = self._nrows
else:
count = self._ncols
for i in range(count):
self.cbar_axes[i].set_visible(True)
for j in range(i + 1, self.ngrids):
self.cbar_axes[j].set_visible(False)
else:
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(False)
self.cbar_axes[i].set_position([1., 1., 0.001, 0.001],
which="active")
self._divider.set_horizontal(h)
self._divider.set_vertical(v)
AxesGrid = ImageGrid
#if __name__ == "__main__":
if 0:
F = plt.figure(1, (7, 6))
F.clf()
F.subplots_adjust(left=0.15, right=0.9)
grid = Grid(F, 111, # similar to subplot(111)
nrows_ncols=(2, 2),
direction="row",
axes_pad = 0.05,
add_all=True,
label_mode = "1",
)
#if __name__ == "__main__":
if 0:
from .axes_divider import get_demo_image
F = plt.figure(1, (9, 3.5))
F.clf()
F.subplots_adjust(left=0.05, right=0.98)
grid = ImageGrid(F, 131, # similar to subplot(111)
nrows_ncols=(2, 2),
direction="row",
axes_pad = 0.05,
add_all=True,
label_mode = "1",
)
Z, extent = get_demo_image()
plt.ioff()
for i in range(4):
im = grid[i].imshow(Z, extent=extent, interpolation="nearest")
# This only affects axes in
# first column and second row as share_all = False.
grid.axes_llc.set_xticks([-2, 0, 2])
grid.axes_llc.set_yticks([-2, 0, 2])
plt.ion()
grid = ImageGrid(F, 132, # similar to subplot(111)
nrows_ncols=(2, 2),
direction="row",
axes_pad = 0.0,
add_all=True,
share_all=True,
label_mode = "1",
cbar_mode="single",
)
Z, extent = get_demo_image()
plt.ioff()
for i in range(4):
im = grid[i].imshow(Z, extent=extent, interpolation="nearest")
plt.colorbar(im, cax=grid.cbar_axes[0])
plt.setp(grid.cbar_axes[0].get_yticklabels(), visible=False)
# This affects all axes as share_all = True.
grid.axes_llc.set_xticks([-2, 0, 2])
grid.axes_llc.set_yticks([-2, 0, 2])
plt.ion()
grid = ImageGrid(F, 133, # similar to subplot(122)
nrows_ncols=(2, 2),
direction="row",
axes_pad = 0.1,
add_all=True,
label_mode = "1",
share_all = True,
cbar_location="top",
cbar_mode="each",
cbar_size="7%",
cbar_pad="2%",
)
plt.ioff()
for i in range(4):
im = grid[i].imshow(Z, extent=extent, interpolation="nearest")
plt.colorbar(im, cax=grid.cbar_axes[i],
orientation="horizontal")
grid.cbar_axes[i].xaxis.set_ticks_position("top")
plt.setp(grid.cbar_axes[i].get_xticklabels(), visible=False)
# This affects all axes as share_all = True.
grid.axes_llc.set_xticks([-2, 0, 2])
grid.axes_llc.set_yticks([-2, 0, 2])
plt.ion()
plt.draw()
| mit |
Delosari/dazer | bin/lib/Astro_Libraries/spectrum_fitting/import_functions.py | 1 | 34109 | import os
import sys
import numpy as np
import ConfigParser
from errno import ENOENT
from numpy import loadtxt
from pandas import read_excel, read_csv
from collections import OrderedDict
from scipy.interpolate import interp1d
from distutils.util import strtobool
from astropy.io import fits as astropyfits
# Function to create folders
def make_folder(folder_path):
#TODO This one is only valid for 2.7
#TODO add this one to your collection
try:
os.makedirs(folder_path)
except OSError:
if not os.path.isdir(folder_path):
raise
return
# Function to delete files
def silent_remove(filename_list):
for filename in filename_list:
try:
os.remove(filename)
except OSError as e: # this would be "except OSError, e:" before Python 2.6
if e.errno != ENOENT: # errno.ENOENT = no such file or directory
raise # re-raise exception if a different error occurred
# Sample data for FIT3D compilation
def example_data(data_folder):
arguments_dict = OrderedDict()
arguments_dict['script'] = 'auto_ssp_elines_rnd.py' # 0
arguments_dict['input_spec'] = 'NGC5947.spec_5.txt' # 1
arguments_dict['SSPs_lib'] = 'ssp_lib.fits,' + 'ssp_lib.fits' # 2
arguments_dict['output_file'] = 'auto_ssp.NGC5947.cen.only.out' # 3
arguments_dict['mask_file'] = 'mask_elines.txt' # 4
arguments_dict['conf_file'] = 'auto_ssp_V500_several_Hb.config' # 5
arguments_dict['plot_tag'] = 1 # 6
arguments_dict['min'] = -1 # 7
arguments_dict['max'] = 40 # 8
arguments_dict['wmin'] = '3850' # 9
arguments_dict['wmax'] = '6800' # 10
arguments_dict['z_elines_mask'] = 'emission_lines.txt' # 11
arguments_dict['input_z'] = 0.02 # 12
arguments_dict['delta_z'] = 0.001 # 13
arguments_dict['min_z'] = 0.015 # 14
arguments_dict['max_z'] = 0.025 # 15
arguments_dict['input_sigma'] = 2.0 # 16
arguments_dict['delta_sigma'] = 0.5 # 17
arguments_dict['min_sigma'] = 1 # 18
arguments_dict['max_sigma'] = 9 # 19
arguments_dict['input_Av'] = 0.5 # 20
arguments_dict['delta_Av'] = 0.1 # 21
arguments_dict['min_Av'] = 0.0 # 22
arguments_dict['max_Av'] = 1.6 # 23
return arguments_dict
# Function to check for nan entries
def check_missing_flux_values(flux):
# Evaluate the nan array
nan_idcs = np.isnan(flux)
nan_count = np.sum(nan_idcs)
# Directly save if not nan
if nan_count > 0:
print '--WARNING: missing flux entries'
return
# Function to import configuration data
def parseObjData(file_address, sectionName, objData):
parser = ConfigParser.SafeConfigParser()
parser.optionxform = str
if os.path.isfile(file_address):
parser.read(file_address)
if not parser.has_section(sectionName):
parser.add_section(sectionName)
for key in objData.keys():
value = objData[key]
if value is not None:
if isinstance(value, list) or isinstance(value, np.ndarray):
value = ','.join(str(x) for x in value)
else:
value = str(value)
else:
value = ''
parser.set(sectionName, key, value)
with open(file_address, 'w') as f:
parser.write(f)
return
# Function to save data to configuration file section
def parseDataFile(file_address, section, data, type_data=None, key_suffix = ''):
# Check if file exists
if os.path.isfile(file_address):
cfg = ConfigParser.ConfigParser()
cfg.optionxform = str
cfg.read(file_address)
else:
exit('--WARNING: Default configuration could not be found exiting program\n-Missing file: {}'.format(file_address))
# Check section is in conf.ini else create it
if not cfg.has_section(section):
cfg.add_section(section)
# Change format to safe data in dictionary
for key in data:
value = data[key]
if type_data is not None:
# TODO add a protocol to infer best format to save data
if type_data is 'lists':
value = list(value)
value = ','.join(str(x) for x in value)
# try:
# confDict[option] = np.array(map(float, raw_list.split(',')))
# except:
# confDict[option] = np.array(map(str, raw_list.split(',')))
cfg.set(section, key + key_suffix, value)
with open(file_address, 'w') as f:
cfg.write(f)
return
# Class with tools to import SSPs libraries
class SspSynthesisImporter:
def __init__(self):
# ------------Configuration of Fit3D
self.sspSyn_commands_params = [
'script', # 0 python script name
'input_spec', # 1 input galactic spectrum name
'SSPs_lib', # 2 fits-table to use with python
'output_file', # 3 Reference name for output files
'mask_file', # 4 File with the spectrum region masks
'conf_file', # 5 Configuration file for the masks
'plot_tag', # 6 tag to launch the plotting
'min', # 7 Min flux for ploting
'max', # 8 Max flux for ploting
'wmin', # 9 Minimum wavelength for plotting
'wmax', # 10 Maximum wavelength for plotting
'z_elines_mask', # 11 Emission lines file
'input_z', # 12 Input redshift
'delta_z', # 13 Increments for redshift
'min_z', # 14 Minimum redshift
'max_z', # 15 Maximum redshift
'input_sigma', # 16 Input velocity dispersion
'delta_sigma', # 17 Increments for velocity dispersion
'min_sigma', # 18 Minimum velocity dispersion
'max_sigma', # 19 Maximum velocity dispersion
'input_Av', # 20 Input reddening
'delta_Av', # 21 Increments for reddening
'min_Av', # 22 Minimum reddening
'max_Av', # 23 Maximum reddening
]
# The first 4 lines in the configuration file describe the input
self.sspSyn_config_params = [['input_z', 'delta_z', 'min_z', 'max_z', 'DV', 'RV', 'DS', 'RS', 'MIN_W', 'MAX_W'],
# 12-16
['input_sigma', 'delta_sigma', 'min_sigma', 'max_sigma'],
# 17-20
['input_Av', 'delta_Av', 'min_Av', 'max_Av'],
# 21-24
['N_Systems'], # Number of SSP bases
['START_W', 'END_W', 'MASK_FILE', 'CONFIG_FILE', 'NPOLY', 'MASK_FILE_POLY',
'N_MIN_E', 'N_MAX_E'], # Bases config
['MIN_DELTA_CHISQ', 'MAX_NITER', 'CUT_MEDIAN_FLUX'],
['start_w_peak', 'end_w_peak'],
['wavelength_to_norm', 'width_AA', 'new_back_templates.fits']]
# Bases float indeces
self.idcs_floats = np.array([0, 1, 4, 6, 7])
# Emision lines mask columns headers
self.eline_mask_header = ['start_wave', 'end_wave', 'mask_file', 'mask_config_file', 'n_poly', 'mask_file_poly',
'n_min_e', 'n_max_e']
# Number of montercarlo iterations
self.n_mc = 30
# Initial value for the chiSq_min
self.chiSq_min = 1e12
return
def load_FIT3D_data(self, conf_file, data_folder=None):
# Check if we are executing from the folder file
data_folder = os.getcwd() + '/' if data_folder is None else data_folder
# Read parameters from command line
command_dict = self.load_FIT3D_command_params(data_folder=data_folder)
config_dict = self.load_FIT3D_config_file(conf_file)
# Update the fit configuration giving preference to the values from the command line
config_dict.update(command_dict)
# Load observational data and masks
config_dict = self.load_FIT3D_observational_fits(data_folder, config_dict)
# Prepare output files
output_root = config_dict['output_file'][:config_dict['output_file'].rfind('.')]
config_dict['single_output_file'] = '{rootname}_{file_code}.{ext}'.format(rootname=output_root,
file_code='single', ext='txt')
config_dict['coeffs_output_file'] = '{rootname}_{file_code}.{ext}'.format(rootname=output_root,
file_code='coeffs', ext='txt')
config_dict['spectrum_output_file'] = '{rootname}_{file_code}.{ext}'.format(rootname=output_root,
file_code='spec', ext='txt')
config_dict['em_lines_output_file'] = '{rootname}_{file_code}.{ext}'.format(rootname=output_root,
file_code='elines', ext='txt')
# Delete these output files if they had been generated from a previos run #USEFULL_Function
silent_remove([config_dict['output_file'], config_dict['single_output_file'], config_dict['coeffs_output_file'],
config_dict['spectrum_output_file'], config_dict['em_lines_output_file']])
# Store folder with the data and configuration folder
config_dict['data_folder'] = data_folder
config_dict['conf_file'] = conf_file
config_dict['data_type'] = 'FIT3D'
return config_dict
def load_FIT3D_command_params(self, data_folder):
# Empty dictionary to store the data from the commands from the command line
command_dict = OrderedDict()
# Extract line command arguments
self.args_list = sys.argv
# Check if the minimum parameters have been introduced (WARNING: Need to convert these to the right units)
if len(self.args_list) > 7:
command_dict = OrderedDict(zip(self.sspSyn_commands_params[:len(self.args_list)], self.args_list))
else:
print '--Error: The input command must include all these arguments:'
print ', '.join(self.sspSyn_commands_params[:7])
# Currently run test example if not enought data is provided
print '---Using example data'
command_dict = example_data(data_folder=data_folder)
return command_dict
def load_FIT3D_config_file(self, config_file_address):
# Empty dictionary to store the data from the config file
fit_conf_dict = {}
# Read the configuration text file
with open(config_file_address) as conf_file:
conf_lines = conf_file.readlines()
# Read redshift, sigma and Av params rows
for i in range(3):
param_values = np.array(conf_lines[i].split(), dtype=float)
fit_conf_dict.update(zip(self.sspSyn_config_params[i], param_values))
# Read masks rows: 'START_W_n','END_W_n','MASK_FILE_n' ...
nLineMasks = int(conf_lines[3])
fit_conf_dict['nLineMasks'] = int(conf_lines[3])
for i in range(4, 4 + fit_conf_dict['nLineMasks']):
bases_key = 'base_{}'.format(i - 4)
param_values = np.array(conf_lines[i].split())
# Convert to float numerical entries
param_values[0] = float(param_values[0])
param_values[1] = float(param_values[1])
param_values[4] = float(param_values[4])
param_values[6] = float(param_values[6])
param_values[7] = float(param_values[7])
fit_conf_dict[bases_key] = param_values
# Add ChiSq row (converting to float)
param_values = np.array(conf_lines[4 + nLineMasks].split(), dtype=float)
fit_conf_dict.update(zip(self.sspSyn_config_params[5], param_values))
# Add peak wavelength row (converting to float)
param_values = np.array(conf_lines[5 + nLineMasks].split(), dtype=float)
fit_conf_dict.update(zip(self.sspSyn_config_params[6], param_values))
# Normalizing row (if available) (converting to float)
if len(conf_lines) == 7 + nLineMasks:
param_values = np.array(conf_lines[6 + nLineMasks].split(), dtype=float)
fit_conf_dict.update(zip(self.sspSyn_config_params[7], param_values))
else:
fit_conf_dict['wave_norm'] = None
fit_conf_dict['w_wave_norm'] = None
fit_conf_dict['new_back_file'] = None
return fit_conf_dict
def load_FIT3D_mask(self, config_dict, obs_flux_resam):
obs_wave = config_dict['obs_wave']
# --------------Generating spectrum mask
# Load spectrum masks
mask_xmin, mask_xmax = loadtxt(config_dict['data_folder'] + config_dict['mask_file'], unpack=True)
# Load emission lines reference to generate artificial mask
emLine_wave = loadtxt(config_dict['data_folder'] + config_dict['z_elines_mask'], usecols=([0]), unpack=True)
emLine_mask_xmin = emLine_wave * (1 + config_dict['input_z']) - 4.0 * config_dict['input_sigma']
emLine_mask_xmax = emLine_wave * (1 + config_dict['input_z']) + 4.0 * config_dict['input_sigma']
# Firt check non zero entries
idx_mask_zero = (obs_flux_resam != 0)
# Pixels within the spectrum mask
idx_spec_mask = np.ones(len(obs_wave), dtype=bool)
for i in range(len(mask_xmin)):
idx_cur_spec_mask = (obs_wave > mask_xmin[i]) & (obs_wave < mask_xmax[i])
idx_spec_mask = idx_spec_mask & ~idx_cur_spec_mask
# Pixels within the emline mask
idx_emline_mask = np.ones(len(obs_wave), dtype=bool)
for i in range(len(emLine_wave)):
idx_cur_emline_mask = (obs_wave > emLine_mask_xmin[i]) & (obs_wave < emLine_mask_xmax[i])
idx_emline_mask = idx_emline_mask & ~idx_cur_emline_mask
# Recover wavelength limits for the masks
wmin_str, wmax_str = config_dict['wmin'].split(','), config_dict['wmax'].split(',')
wmin = float(wmin_str[0]) if len(wmin_str) == 2 else float(config_dict['wmin'])
wmax = float(wmax_str[0]) if len(wmax_str) == 2 else float(config_dict['wmax'])
idx_mask_wmin, idx_mask_wmax = (obs_wave > wmin), (obs_wave < wmax)
# Combined individual indeces into a global mask
print idx_mask_zero.shape
print idx_spec_mask.shape
print idx_emline_mask.shape
print idx_mask_wmax.shape
total_masks = idx_mask_zero & idx_spec_mask & idx_emline_mask & idx_mask_wmin & idx_mask_wmax
return total_masks
def load_FIT3D_observational_fits(self, data_folder, config_dict):
# --------------Read observational data
obs_data = loadtxt(data_folder + config_dict['input_spec'])
obs_wave = obs_data[:, 1]
obs_flux = obs_data[:, 2]
obs_fluxVar = obs_data[:, 3]
# Issues with spectra: nan entries
check_missing_flux_values(obs_flux)
# Get the error from the library fits
if obs_fluxVar is not None:
obs_flux_err = np.sqrt(abs(obs_fluxVar))
# Else calculate it from the spectrum
else:
obs_flux_err = np.sqrt(abs(obs_flux) / 10)
# Remove big error entries
median_err = np.median(obs_flux_err)
idx_big_err = (obs_flux_err > 1.5 * median_err)
obs_fluxErrAdj = np.copy(obs_flux_err)
obs_fluxErrAdj[idx_big_err] = 1.5 * median_err
# --------------Store data
config_dict['obs_wave'] = obs_wave
config_dict['obs_flux'] = obs_flux
config_dict['obs_flux_err'] = obs_flux_err
config_dict['obs_fluxErrAdj'] = obs_fluxErrAdj
config_dict['nObsPix'] = len(obs_flux)
return config_dict
def import_Fit3D_ssplibrary(self, ssp_file_address):
# Dictionary to store the data
ssp_lib_dict = {}
fluxBases, hdrBases = astropyfits.getdata(ssp_file_address, 0, header=True)
fluxBases = np.asfortranarray(fluxBases)
nBases, nPixelsBases = fluxBases.shape
crpix, cdelt, crval = hdrBases['CRPIX1'], hdrBases['CDELT1'], hdrBases['CRVAL1']
pixArray = np.arange(0, nPixelsBases) # WARNING should this arrange start at one?
basesWavelength = (crval + cdelt * (pixArray + 1 - crpix))
# Extract age and metallicity from the bases names
Z_vector, age_vector = np.empty(nBases), np.empty(nBases)
for i in range(nBases):
header_code = 'NAME{}'.format(i)
# Read metallicity and age from and headers list
base_keyname = hdrBases[header_code]
age_str = base_keyname[9:base_keyname.find('_z')]
metal_str = base_keyname[base_keyname.find('_z') + 2:base_keyname.rfind('.')]
age_factor = 1000.0 if 'Myr' in age_str else 1
age_vector[i] = float(age_str[:-3]) / age_factor
Z_vector[i] = float('0.' + metal_str)
# Staore library data in a dictionary
ssp_lib_dict['crpix_bases'] = crpix
ssp_lib_dict['cdelt_bases'] = cdelt
ssp_lib_dict['crval_bases'] = crval
ssp_lib_dict['basesWave'] = basesWavelength
ssp_lib_dict['nBases'] = nBases
ssp_lib_dict['nPixBases_max'] = nPixelsBases
ssp_lib_dict['fluxBases'] = fluxBases
ssp_lib_dict['hdrBases'] = hdrBases
ssp_lib_dict['ageBases'] = age_vector
ssp_lib_dict['zBases'] = Z_vector
# ssp_lib_dict['bases_one_array'] = ones(nBases)
return ssp_lib_dict
def import_STARLIGHT_ssplibrary(self, bases_folder, libraries_file_list):
print '\n--Importing STARLIGHT library'
print '---Bases file: {}'.format(libraries_file_list)
print '---Bases folder: {}'.format(bases_folder)
# Dictionary to store the data
ssp_lib_dict = {}
columns_names = ['file_name', 'age_yr', 'z_star', 'bases_nickname', 'f_star', 'YAV_flag', 'alpha/Fe']
bases_df = read_csv(libraries_file_list, delim_whitespace=True, names=columns_names, skiprows=1)
# Initial pass to check the biggest size
nBases = len(bases_df.index)
max_nPixelsBases = 0
# Empty contaiores to store the data
waveBases_orig = []
fluxBases_orig = []
Z_vector, age_vector = np.empty(nBases), np.empty(nBases)
for i in range(nBases):
bases_file = bases_folder + bases_df.iloc[i]['file_name']
wave_base_i, flux_base_i = loadtxt(bases_file, unpack=True)
# Original wavelength range and fluxes from the bases. They may have different wavelength range
waveBases_orig.append(
wave_base_i) # This is not pretty but not other option if bases do not have same length...
fluxBases_orig.append(
flux_base_i) # This is not pretty but not other option if bases do not have same length...
# Interpolate the bases to observed wavelength resolution (1 angstrom per pixel is the current rule)
age_vector[i] = bases_df.iloc[i]['age_yr']
Z_vector[i] = bases_df.iloc[i]['z_star']
ssp_lib_dict['basesWave'] = waveBases_orig # This is not pretty but not other option if bases do not have same length...
ssp_lib_dict['nBases'] = nBases
ssp_lib_dict['nPixBases_max'] = max_nPixelsBases
ssp_lib_dict['fluxBases'] = fluxBases_orig # This is not pretty but not other option if bases do not have same length...
ssp_lib_dict['ageBases'] = age_vector
ssp_lib_dict['zBases'] = Z_vector
# ssp_lib_dict['bases_one_array'] = ones(nBases)
print '--Library imported'
return ssp_lib_dict
# Class with SpecSyzer dataloading tools
class ImportModelData(SspSynthesisImporter):
def __init__(self, confFolder):
# Class with tools to import starlight bases
SspSynthesisImporter.__init__(self)
# Load default configuration file
self.config = self.load_confFile(confFolder, 'config.ini')
# Define default folders
self.dataFolder = os.path.join(os.path.expanduser('~'), self.config['inference_folder'])
self.inputsFolder = os.path.join(self.dataFolder, self.config['input_data_folder'])
self.outputsFolder = os.path.join(self.dataFolder, self.config['output_data_folder'])
self.externalDataFolder = os.path.join(confFolder, self.config['external_data_folder']) # TODO this declaration is not universal with operative system try pathlib
self.linesFormatDf = os.path.join(confFolder, self.config['external_data_folder'])
self.configFolder = os.path.join(confFolder, 'config.ini')
self.linesDb = read_excel(os.path.join(self.externalDataFolder, self.config['linesData_file']), sheet_name=0, header=0, index_col=0)
def load_confFile(self, root_folder, confFile):
# Configuration file address
file_address = '{}/{}'.format(root_folder, confFile)
# Check if file exists
if os.path.isfile(file_address):
cfg = ConfigParser.ConfigParser()
cfg.optionxform = str
cfg.read(file_address)
else:
exit('--WARNING: Default configuration could not be found exiting program')
# Loop through configuration file sections and merge into a dictionary
confDict = dict(cfg.items('conf_entries'))
confDict['sections'] = cfg.sections()
for i in range(1, len(cfg.sections())):
section = cfg.sections()[i]
confDict[section] = cfg.options(section)
for option in cfg.options(section):
if (option in confDict['string_conf']) or ('_folder' in option) or ('_file' in option):
confDict[option] = cfg.get(section, option)
elif '_check' in option:
confDict[option] = cfg.getboolean(section, option)
elif (option in confDict['list_conf']) or ('_parameters' in option) or ('_prior' in option) or ('_list' in option) or ('_coeffs' in option):
raw_list = cfg.get(section, option)
# Special entry
if option is 'input_lines':
if raw_list is 'all':
confDict[option] = raw_list
else:
confDict[option] = np.array(map(str, raw_list.split(',')))
# By default try to read as a list of floats else strings
else:
try:
confDict[option] = np.array(map(float, raw_list.split(',')))
except:
confDict[option] = np.array(map(str, raw_list.split(',')))
# By default read as a float
else:
confDict[option] = cfg.getfloat(section, option)
# Include configuration file in the dictionary
confDict['confAddress'] = file_address
return confDict
def load_obsData(self, obsFile=None, objName=None):
# TODO this should go into the master configuration
list_parameters = ['input_lines', 'Av_prefit','sigma_star_prefit', 'coeffsPop_prefit', 'coeffsPopErr_prefit', 'wavelengh_limits', 'norm_interval'] #also all 'param_prior'
boolean_parameters = ['Normalized_by_Hbeta']
string_parameters = ['address_lines_log', 'address_spectrum', 'address_obs_mask', 'obsFile', 'objName']
# ----Load the obj data
if obsFile is not None:
cfg = ConfigParser.SafeConfigParser()
cfg.optionxform = str
cfg.read(obsFile)
# If not section is provided we assume the file only has one and it gives us the properties of the observation
if objName is None:
objName = cfg.options(cfg.sections()[0])
# Dictionary with the observation data
obj_data = dict(cfg.items(objName))
obj_data['obsFile'] = obsFile
obj_data['objName'] = objName
#Recover data from previous fits
results_section = objName + '_results'
if cfg.has_section(results_section):
prefit_data = dict(cfg.items(results_section))
obj_data.update(prefit_data)
else:
# Dictionary with the observation data # TODO This does not work so well
obj_data = locals()
# Convert to the right format # TODO Add security warnings for wrong data
for key in obj_data.keys():
# Empty variable
if obj_data[key] == '':
obj_data[key] = None
# None variable
elif obj_data[key] is None:
obj_data[key] = None
# Arrays (The last boolean overrides the parameters
elif ',' in obj_data[key]:
if (key in list_parameters) or ('_prior' in key) or ('_true' in key) or (',' in obj_data[key]):
if key in ['input_lines']:
if obj_data[key] == 'all':
obj_data[key] = 'all'
else:
obj_data[key] = np.array(map(str, obj_data[key].split(',')))
else:
newArray = []
textArrays = obj_data[key].split(',')
for item in textArrays:
convertValue = float(item) if item != 'None' else np.nan
newArray.append(convertValue)
obj_data[key] = np.array(newArray)
# Boolean
elif (key in boolean_parameters) or ('_check' in key):
obj_data[key] = strtobool(obj_data[key]) == 1
# Remaining are either strings (rest floats)
elif key not in string_parameters:
obj_data[key] = float(obj_data[key])
# #Unrecognize object function
# else:
# print 'WARNING: Parameter {} in {} not recognize. Exiting code'.format(key, obsFile)
# exit()
# ----Load the obj spectrum, #TODO read this one using pandas and that way you can chek if there is a third column for the error
obj_data['obs_wavelength'], obj_data['obs_flux'] = loadtxt(obj_data['address_spectrum'], usecols=(0, 1), unpack=True)
# ----Load obj lines log # TODO update code to use address_lines_log
obj_data['obj_lines_file'] = obj_data['address_lines_log']
return obj_data
def import_optical_depth_coeff_table(self, file_address):
Data_dict = OrderedDict()
opticalDepthCoeffs_df = read_csv(file_address, delim_whitespace=True, header=0)
opticalDepthCoeffs = {}
for column in opticalDepthCoeffs_df.columns:
opticalDepthCoeffs[column] = opticalDepthCoeffs_df[column].values
return opticalDepthCoeffs
def load_ssp_library(self, ssp_lib_type, data_folder=None, data_file=None, wavelengh_limits=None, resample_inc=None, norm_interval=None):
# TODO In here we need to add a test sample library
# Store stellar base type
sspLib_dict = {'data_type': ssp_lib_type}
# Import the base type
if ssp_lib_type == 'FIT3D':
# Check if more files are being introduced
if ',' in data_file:
ssp_lib1, ssp_lib2 = data_file.split(',') # Corrently we are only using the first one (the big)
else:
ssp_lib1 = data_file
sspLib_dict = self.import_Fit3D_ssplibrary(data_folder + ssp_lib1)
elif ssp_lib_type == 'starlight':
sspLib_dict = self.import_STARLIGHT_ssplibrary(data_folder, data_file)
# Store stellar base type
sspLib_dict['data_type'] = ssp_lib_type
# Trim, resample and normalized the ssp library if required
if wavelengh_limits or resample_inc or norm_interval:
self.treat_input_spectrum(sspLib_dict, sspLib_dict['basesWave'], sspLib_dict['fluxBases'], wavelengh_limits,
resample_inc, norm_interval)
return sspLib_dict
def treat_input_spectrum(self, output_dict, spec_wave, spec_flux, wavelengh_limits=None, resample_inc=None, norm_interval=None):
# TODO we should remove the nBases requirement by some style which can just read the number of dimensions
# Store input values
output_dict['wavelengh_limits'] = wavelengh_limits
output_dict['resample_inc'] = resample_inc
output_dict['norm_interval'] = norm_interval
# Special case using 0, -1 indexing
if wavelengh_limits is not None:
if (wavelengh_limits[0] != 0) and (wavelengh_limits[0] != -1):
inputWaveLimits = wavelengh_limits
else:
inputWaveLimits = wavelengh_limits
if wavelengh_limits[0] == 0:
inputWaveLimits[0] = int(np.ceil(spec_wave[0]) + 1)
if wavelengh_limits[-1] == -1:
inputWaveLimits[-1] = int(np.floor(spec_wave[-1]) - 1)
# Resampling the spectra
if resample_inc is not None:
wave_resam = np.arange(inputWaveLimits[0], inputWaveLimits[-1], resample_inc, dtype=float)
# Loop throught the fluxes (In the case of the bases it is assumed they may have different wavelength ranges)
if isinstance(spec_flux, list):
flux_resam = np.empty((output_dict['nBases'], len(wave_resam)))
for i in range(output_dict['nBases']):
flux_resam[i, :] = interp1d(spec_wave[i], spec_flux[i], bounds_error=True)(wave_resam)
# In case only one dimension
elif spec_flux.ndim == 1:
flux_resam = interp1d(spec_wave, spec_flux, bounds_error=True)(wave_resam)
output_dict['wave_resam'] = wave_resam
output_dict['flux_resam'] = flux_resam
else:
output_dict['wave_resam'] = spec_wave
output_dict['flux_resam'] = spec_flux
# Normalizing the spectra
if norm_interval is not None:
# Loop throught the fluxes (In the case of the bases it is assumed they may have different wavelength ranges)
if isinstance(spec_flux, list):
normFlux_coeff = np.empty(output_dict['nBases'])
flux_norm = np.empty((output_dict['nBases'], len(wave_resam)))
for i in range(output_dict['nBases']):
idx_Wavenorm_min, idx_Wavenorm_max = np.searchsorted(spec_wave[i], norm_interval)
normFlux_coeff[i] = np.mean(spec_flux[i][idx_Wavenorm_min:idx_Wavenorm_max])
flux_norm[i] = output_dict['flux_resam'][i] / normFlux_coeff[i]
elif spec_flux.ndim == 1:
idx_Wavenorm_min, idx_Wavenorm_max = np.searchsorted(spec_wave, norm_interval)
normFlux_coeff = np.mean(spec_flux[idx_Wavenorm_min:idx_Wavenorm_max])
flux_norm = output_dict['flux_resam'] / normFlux_coeff
output_dict['flux_norm'] = flux_norm
output_dict['normFlux_coeff'] = normFlux_coeff
else:
output_dict['flux_norm'] = output_dict['flux_resam']
output_dict['normFlux_coeff'] = 1.0
return
def generate_object_mask(self, linesDf, wavelength, linelabels):
# TODO This will not work for a redshifted lines log
idcs_lineMasks = linesDf.index.isin(linelabels)
idcs_spectrumMasks = ~linesDf.index.isin(linelabels)
# Matrix mask for integring the emission lines
n_lineMasks = idcs_lineMasks.sum()
self.boolean_matrix = np.zeros((n_lineMasks, wavelength.size), dtype=bool)
# Array with line wavelength resolution which we fill with default value (This is because there are lines beyong the continuum range)
self.lineRes = np.ones(n_lineMasks) * (wavelength[1] - wavelength[0])
# Total mask for valid regions in the spectrum
n_objMasks = idcs_spectrumMasks.sum()
self.int_mask = np.ones(wavelength.size, dtype=bool)
self.object_mask = np.ones(wavelength.size, dtype=bool)
# Loop through the emission lines
wmin, wmax = linesDf['w3'].loc[idcs_lineMasks].values, linesDf['w4'].loc[idcs_lineMasks].values
idxMin, idxMax = np.searchsorted(wavelength, [wmin, wmax])
for i in range(n_lineMasks):
if not np.isnan(wmin[i]) and not np.isnan(wmax[i]) and (wmax[i] < wavelength[-1]): # We need this for lines beyong continuum range #TODO propose better
w2, w3 = wavelength[idxMin[i]], wavelength[idxMax[i]]
idx_currentMask = (wavelength >= w2) & (wavelength <= w3)
self.boolean_matrix[i, :] = idx_currentMask
self.int_mask = self.int_mask & ~idx_currentMask
self.lineRes[i] = wavelength[idxMax[i]] - wavelength[idxMax[i] - 1]
# Loop through the object masks
wmin, wmax = linesDf['w3'].loc[idcs_spectrumMasks].values, linesDf['w4'].loc[idcs_spectrumMasks].values
idxMin, idxMax = np.searchsorted(wavelength, [wmin, wmax])
for i in range(n_objMasks):
if not np.isnan(wmin[i]) and not np.isnan(wmax[i]) and (wmax[i] < wavelength[-1]):
w2, w3 = wavelength[idxMin[i]], wavelength[idxMax[i]]
idx_currentMask = (wavelength >= w2) & (wavelength <= w3)
self.int_mask = self.int_mask & ~idx_currentMask
self.object_mask = self.object_mask & ~idx_currentMask
return
| mit |
yochow/autotest | new_tko/tko/graphing_utils.py | 1 | 32535 | import base64, os, tempfile, operator, pickle, datetime, django.db
import os.path, getpass
from math import sqrt
# When you import matplotlib, it tries to write some temp files for better
# performance, and it does that to the directory in MPLCONFIGDIR, or, if that
# doesn't exist, the home directory. Problem is, the home directory is not
# writable when running under Apache, and matplotlib's not smart enough to
# handle that. It does appear smart enough to handle the files going
# away after they are written, though.
temp_dir = os.path.join(tempfile.gettempdir(),
'.matplotlib-%s' % getpass.getuser())
if not os.path.exists(temp_dir):
os.mkdir(temp_dir)
os.environ['MPLCONFIGDIR'] = temp_dir
import matplotlib
matplotlib.use('Agg')
import matplotlib.figure, matplotlib.backends.backend_agg
import StringIO, colorsys, PIL.Image, PIL.ImageChops
from autotest_lib.frontend.afe import readonly_connection
from autotest_lib.frontend.afe.model_logic import ValidationError
from autotest_lib.frontend.afe.simplejson import encoder
from autotest_lib.client.common_lib import global_config
from new_tko.tko import models, tko_rpc_utils
_FIGURE_DPI = 100
_FIGURE_WIDTH_IN = 10
_FIGURE_BOTTOM_PADDING_IN = 2 # for x-axis labels
_SINGLE_PLOT_HEIGHT = 6
_MULTIPLE_PLOT_HEIGHT_PER_PLOT = 4
_MULTIPLE_PLOT_MARKER_TYPE = 'o'
_MULTIPLE_PLOT_MARKER_SIZE = 4
_SINGLE_PLOT_STYLE = 'bs-' # blue squares with lines connecting
_SINGLE_PLOT_ERROR_BAR_COLOR = 'r'
_LEGEND_FONT_SIZE = 'xx-small'
_LEGEND_HANDLE_LENGTH = 0.03
_LEGEND_NUM_POINTS = 3
_LEGEND_MARKER_TYPE = 'o'
_LINE_XTICK_LABELS_SIZE = 'x-small'
_BAR_XTICK_LABELS_SIZE = 8
_json_encoder = encoder.JSONEncoder()
class NoDataError(Exception):
"""\
Exception to raise if the graphing query returned an empty resultset.
"""
def _colors(n):
"""\
Generator function for creating n colors. The return value is a tuple
representing the RGB of the color.
"""
for i in xrange(n):
yield colorsys.hsv_to_rgb(float(i) / n, 1.0, 1.0)
def _resort(kernel_labels, list_to_sort):
"""\
Resorts a list, using a list of kernel strings as the keys. Returns the
resorted list.
"""
labels = [tko_rpc_utils.KernelString(label) for label in kernel_labels]
resorted_pairs = sorted(zip(labels, list_to_sort))
# We only want the resorted list; we are not interested in the kernel
# strings.
return [pair[1] for pair in resorted_pairs]
def _quote(string):
return "%s%s%s" % ("'", string.replace("'", r"\'"), "'")
_HTML_TEMPLATE = """\
<html><head></head><body>
<img src="data:image/png;base64,%s" usemap="#%s"
border="0" alt="graph">
<map name="%s">%s</map>
</body></html>"""
_AREA_TEMPLATE = """\
<area shape="rect" coords="%i,%i,%i,%i" title="%s"
href="#"
onclick="%s(%s); return false;">"""
class MetricsPlot(object):
def __init__(self, query_dict, plot_type, inverted_series, normalize_to,
drilldown_callback):
"""
query_dict: dictionary containing the main query and the drilldown
queries. The main query returns a row for each x value. The first
column contains the x-axis label. Subsequent columns contain data
for each series, named by the column names. A column named
'errors-<x>' will be interpreted as errors for the series named <x>.
plot_type: 'Line' or 'Bar', depending on the plot type the user wants
inverted_series: list of series that should be plotted on an inverted
y-axis
normalize_to:
None - do not normalize
'first' - normalize against the first data point
'x__%s' - normalize against the x-axis value %s
'series__%s' - normalize against the series %s
drilldown_callback: name of drilldown callback method.
"""
self.query_dict = query_dict
if plot_type == 'Line':
self.is_line = True
elif plot_type == 'Bar':
self.is_line = False
else:
raise ValidationError({'plot' : 'Plot must be either Line or Bar'})
self.plot_type = plot_type
self.inverted_series = inverted_series
self.normalize_to = normalize_to
if self.normalize_to is None:
self.normalize_to = ''
self.drilldown_callback = drilldown_callback
class QualificationHistogram(object):
def __init__(self, query, filter_string, interval, drilldown_callback):
"""
query: the main query to retrieve the pass rate information. The first
column contains the hostnames of all the machines that satisfied the
global filter. The second column (titled 'total') contains the total
number of tests that ran on that machine and satisfied the global
filter. The third column (titled 'good') contains the number of
those tests that passed on that machine.
filter_string: filter to apply to the common global filter to show the
Table View drilldown of a histogram bucket
interval: interval for each bucket. E.g., 10 means that buckets should
be 0-10%, 10%-20%, ...
"""
self.query = query
self.filter_string = filter_string
self.interval = interval
self.drilldown_callback = drilldown_callback
def _create_figure(height_inches):
"""\
Creates an instance of matplotlib.figure.Figure, given the height in inches.
Returns the figure and the height in pixels.
"""
fig = matplotlib.figure.Figure(
figsize=(_FIGURE_WIDTH_IN, height_inches + _FIGURE_BOTTOM_PADDING_IN),
dpi=_FIGURE_DPI, facecolor='white')
fig.subplots_adjust(bottom=float(_FIGURE_BOTTOM_PADDING_IN) / height_inches)
return (fig, fig.get_figheight() * _FIGURE_DPI)
def _create_line(plots, labels, plot_info):
"""\
Given all the data for the metrics, create a line plot.
plots: list of dicts containing the plot data. Each dict contains:
x: list of x-values for the plot
y: list of corresponding y-values
errors: errors for each data point, or None if no error information
available
label: plot title
labels: list of x-tick labels
plot_info: a MetricsPlot
"""
# when we're doing any kind of normalization, all series get put into a
# single plot
single = bool(plot_info.normalize_to)
area_data = []
lines = []
if single:
plot_height = _SINGLE_PLOT_HEIGHT
else:
plot_height = _MULTIPLE_PLOT_HEIGHT_PER_PLOT * len(plots)
figure, height = _create_figure(plot_height)
if single:
subplot = figure.add_subplot(1, 1, 1)
# Plot all the data
for plot_index, (plot, color) in enumerate(zip(plots, _colors(len(plots)))):
needs_invert = (plot['label'] in plot_info.inverted_series)
# Add a new subplot, if user wants multiple subplots
# Also handle axis inversion for subplots here
if not single:
subplot = figure.add_subplot(len(plots), 1, plot_index + 1)
subplot.set_title(plot['label'])
if needs_invert:
# for separate plots, just invert the y-axis
subplot.set_ylim(1, 0)
elif needs_invert:
# for a shared plot (normalized data), need to invert the y values
# manually, since all plots share a y-axis
plot['y'] = [-y for y in plot['y']]
# Plot the series
subplot.set_xticks(range(0, len(labels)))
subplot.set_xlim(-1, len(labels))
if single:
lines += subplot.plot(plot['x'], plot['y'], label=plot['label'],
marker=_MULTIPLE_PLOT_MARKER_TYPE,
markersize=_MULTIPLE_PLOT_MARKER_SIZE)
error_bar_color = lines[-1].get_color()
else:
lines += subplot.plot(plot['x'], plot['y'], _SINGLE_PLOT_STYLE,
label=plot['label'])
error_bar_color = _SINGLE_PLOT_ERROR_BAR_COLOR
if plot['errors']:
subplot.errorbar(plot['x'], plot['y'], linestyle='None',
yerr=plot['errors'], color=error_bar_color)
subplot.set_xticklabels([])
# Construct the information for the drilldowns.
# We need to do this in a separate loop so that all the data is in
# matplotlib before we start calling transform(); otherwise, it will return
# incorrect data because it hasn't finished adjusting axis limits.
for line in lines:
# Get the pixel coordinates of each point on the figure
x = line.get_xdata()
y = line.get_ydata()
label = line.get_label()
icoords = line.get_transform().transform(zip(x,y))
# Get the appropriate drilldown query
drill = plot_info.query_dict['__' + label + '__']
# Set the title attributes (hover-over tool-tips)
x_labels = [labels[x_val] for x_val in x]
titles = ['%s - %s: %f' % (label, x_label, y_val)
for x_label, y_val in zip(x_labels, y)]
# Get the appropriate parameters for the drilldown query
params = [dict(query=drill, series=line.get_label(), param=x_label)
for x_label in x_labels]
area_data += [dict(left=ix - 5, top=height - iy - 5,
right=ix + 5, bottom=height - iy + 5,
title= title,
callback=plot_info.drilldown_callback,
callback_arguments=param_dict)
for (ix, iy), title, param_dict
in zip(icoords, titles, params)]
subplot.set_xticklabels(labels, rotation=90, size=_LINE_XTICK_LABELS_SIZE)
# Show the legend if there are not multiple subplots
if single:
font_properties = matplotlib.font_manager.FontProperties(
size=_LEGEND_FONT_SIZE)
legend = figure.legend(lines, [plot['label'] for plot in plots],
prop=font_properties,
handlelen=_LEGEND_HANDLE_LENGTH,
numpoints=_LEGEND_NUM_POINTS)
# Workaround for matplotlib not keeping all line markers in the legend -
# it seems if we don't do this, matplotlib won't keep all the line
# markers in the legend.
for line in legend.get_lines():
line.set_marker(_LEGEND_MARKER_TYPE)
return (figure, area_data)
def _get_adjusted_bar(x, bar_width, series_index, num_plots):
"""\
Adjust the list 'x' to take the multiple series into account. Each series
should be shifted such that the middle series lies at the appropriate x-axis
tick with the other bars around it. For example, if we had four series
(i.e. four bars per x value), we want to shift the left edges of the bars as
such:
Bar 1: -2 * width
Bar 2: -width
Bar 3: none
Bar 4: width
"""
adjust = (-0.5 * num_plots - 1 + series_index) * bar_width
return [x_val + adjust for x_val in x]
# TODO(showard): merge much of this function with _create_line by extracting and
# parameterizing methods
def _create_bar(plots, labels, plot_info):
"""\
Given all the data for the metrics, create a line plot.
plots: list of dicts containing the plot data.
x: list of x-values for the plot
y: list of corresponding y-values
errors: errors for each data point, or None if no error information
available
label: plot title
labels: list of x-tick labels
plot_info: a MetricsPlot
"""
area_data = []
bars = []
figure, height = _create_figure(_SINGLE_PLOT_HEIGHT)
# Set up the plot
subplot = figure.add_subplot(1, 1, 1)
subplot.set_xticks(range(0, len(labels)))
subplot.set_xlim(-1, len(labels))
subplot.set_xticklabels(labels, rotation=90, size=_BAR_XTICK_LABELS_SIZE)
# draw a bold line at y=0, making it easier to tell if bars are dipping
# below the axis or not.
subplot.axhline(linewidth=2, color='black')
# width here is the width for each bar in the plot. Matplotlib default is
# 0.8.
width = 0.8 / len(plots)
# Plot the data
for plot_index, (plot, color) in enumerate(zip(plots, _colors(len(plots)))):
# Invert the y-axis if needed
if plot['label'] in plot_info.inverted_series:
plot['y'] = [-y for y in plot['y']]
adjusted_x = _get_adjusted_bar(plot['x'], width, plot_index + 1,
len(plots))
bar_data = subplot.bar(adjusted_x, plot['y'],
width=width, yerr=plot['errors'],
facecolor=color,
label=plot['label'])
bars.append(bar_data[0])
# Construct the information for the drilldowns.
# See comment in _create_line for why we need a separate loop to do this.
for plot_index, plot in enumerate(plots):
adjusted_x = _get_adjusted_bar(plot['x'], width, plot_index + 1,
len(plots))
# Let matplotlib plot the data, so that we can get the data-to-image
# coordinate transforms
line = subplot.plot(adjusted_x, plot['y'], linestyle='None')[0]
label = plot['label']
upper_left_coords = line.get_transform().transform(zip(adjusted_x,
plot['y']))
bottom_right_coords = line.get_transform().transform(
[(x + width, 0) for x in adjusted_x])
# Get the drilldown query
drill = plot_info.query_dict['__' + label + '__']
# Set the title attributes
x_labels = [labels[x] for x in plot['x']]
titles = ['%s - %s: %f' % (plot['label'], label, y)
for label, y in zip(x_labels, plot['y'])]
params = [dict(query=drill, series=plot['label'], param=x_label)
for x_label in x_labels]
area_data += [dict(left=ulx, top=height - uly,
right=brx, bottom=height - bry,
title=title,
callback=plot_info.drilldown_callback,
callback_arguments=param_dict)
for (ulx, uly), (brx, bry), title, param_dict
in zip(upper_left_coords, bottom_right_coords, titles,
params)]
figure.legend(bars, [plot['label'] for plot in plots])
return (figure, area_data)
def _normalize(data_values, data_errors, base_values, base_errors):
"""\
Normalize the data against a baseline.
data_values: y-values for the to-be-normalized data
data_errors: standard deviations for the to-be-normalized data
base_values: list of values normalize against
base_errors: list of standard deviations for those base values
"""
values = []
for value, base in zip(data_values, base_values):
try:
values.append(100 * (value - base) / base)
except ZeroDivisionError:
# Base is 0.0 so just simplify:
# If value < base: append -100.0;
# If value == base: append 0.0 (obvious); and
# If value > base: append 100.0.
values.append(100 * float(cmp(value, base)))
# Based on error for f(x,y) = 100 * (x - y) / y
if data_errors:
if not base_errors:
base_errors = [0] * len(data_errors)
errors = []
for data, error, base_value, base_error in zip(
data_values, data_errors, base_values, base_errors):
try:
errors.append(sqrt(error**2 * (100 / base_value)**2
+ base_error**2 * (100 * data / base_value**2)**2
+ error * base_error * (100 / base_value**2)**2))
except ZeroDivisionError:
# Again, base is 0.0 so do the simple thing.
errors.append(100 * abs(error))
else:
errors = None
return (values, errors)
def _create_png(figure):
"""\
Given the matplotlib figure, generate the PNG data for it.
"""
# Draw the image
canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(figure)
canvas.draw()
size = canvas.get_renderer().get_canvas_width_height()
image_as_string = canvas.tostring_rgb()
image = PIL.Image.fromstring('RGB', size, image_as_string, 'raw', 'RGB', 0,
1)
image_background = PIL.Image.new(image.mode, image.size,
figure.get_facecolor())
# Crop the image to remove surrounding whitespace
non_whitespace = PIL.ImageChops.difference(image, image_background)
bounding_box = non_whitespace.getbbox()
image = image.crop(bounding_box)
image_data = StringIO.StringIO()
image.save(image_data, format='PNG')
return image_data.getvalue(), bounding_box
def _create_image_html(figure, area_data, plot_info):
"""\
Given the figure and drilldown data, construct the HTML that will render the
graph as a PNG image, and attach the image map to that image.
figure: figure containing the drawn plot(s)
area_data: list of parameters for each area of the image map. See the
definition of the template string '_AREA_TEMPLATE'
plot_info: a MetricsPlot or QualHistogram
"""
png, bbox = _create_png(figure)
# Construct the list of image map areas
areas = [_AREA_TEMPLATE %
(data['left'] - bbox[0], data['top'] - bbox[1],
data['right'] - bbox[0], data['bottom'] - bbox[1],
data['title'], data['callback'],
_json_encoder.encode(data['callback_arguments'])
.replace('"', '"'))
for data in area_data]
map_name = plot_info.drilldown_callback + '_map'
return _HTML_TEMPLATE % (base64.b64encode(png), map_name, map_name,
'\n'.join(areas))
def _find_plot_by_label(plots, label):
for index, plot in enumerate(plots):
if plot['label'] == label:
return index
raise ValueError('no plot labeled "%s" found' % label)
def _normalize_to_series(plots, base_series):
base_series_index = _find_plot_by_label(plots, base_series)
base_plot = plots[base_series_index]
base_xs = base_plot['x']
base_values = base_plot['y']
base_errors = base_plot['errors']
del plots[base_series_index]
for plot in plots:
old_xs, old_values, old_errors = plot['x'], plot['y'], plot['errors']
new_xs, new_values, new_errors = [], [], []
new_base_values, new_base_errors = [], []
# Select only points in the to-be-normalized data that have a
# corresponding baseline value
for index, x_value in enumerate(old_xs):
try:
base_index = base_xs.index(x_value)
except ValueError:
continue
new_xs.append(x_value)
new_values.append(old_values[index])
new_base_values.append(base_values[base_index])
if old_errors:
new_errors.append(old_errors[index])
new_base_errors.append(base_errors[base_index])
if not new_xs:
raise NoDataError('No normalizable data for series ' +
plot['label'])
plot['x'] = new_xs
plot['y'] = new_values
if old_errors:
plot['errors'] = new_errors
plot['y'], plot['errors'] = _normalize(plot['y'], plot['errors'],
new_base_values,
new_base_errors)
def _create_metrics_plot_helper(plot_info, extra_text=None):
"""
Create a metrics plot of the given plot data.
plot_info: a MetricsPlot object.
extra_text: text to show at the uppper-left of the graph
TODO(showard): move some/all of this logic into methods on MetricsPlot
"""
query = plot_info.query_dict['__main__']
cursor = readonly_connection.connection().cursor()
cursor.execute(query)
if not cursor.rowcount:
raise NoDataError('query did not return any data')
rows = cursor.fetchall()
# "transpose" rows, so columns[0] is all the values from the first column,
# etc.
columns = zip(*rows)
plots = []
labels = [str(label) for label in columns[0]]
needs_resort = (cursor.description[0][0] == 'kernel')
# Collect all the data for the plot
col = 1
while col < len(cursor.description):
y = columns[col]
label = cursor.description[col][0]
col += 1
if (col < len(cursor.description) and
'errors-' + label == cursor.description[col][0]):
errors = columns[col]
col += 1
else:
errors = None
if needs_resort:
y = _resort(labels, y)
if errors:
errors = _resort(labels, errors)
x = [index for index, value in enumerate(y) if value is not None]
if not x:
raise NoDataError('No data for series ' + label)
y = [y[i] for i in x]
if errors:
errors = [errors[i] for i in x]
plots.append({
'label': label,
'x': x,
'y': y,
'errors': errors
})
if needs_resort:
labels = _resort(labels, labels)
# Normalize the data if necessary
normalize_to = plot_info.normalize_to
if normalize_to == 'first' or normalize_to.startswith('x__'):
if normalize_to != 'first':
baseline = normalize_to[3:]
try:
baseline_index = labels.index(baseline)
except ValueError:
raise ValidationError({
'Normalize' : 'Invalid baseline %s' % baseline
})
for plot in plots:
if normalize_to == 'first':
plot_index = 0
else:
try:
plot_index = plot['x'].index(baseline_index)
# if the value is not found, then we cannot normalize
except ValueError:
raise ValidationError({
'Normalize' : ('%s does not have a value for %s'
% (plot['label'], normalize_to[3:]))
})
base_values = [plot['y'][plot_index]] * len(plot['y'])
if plot['errors']:
base_errors = [plot['errors'][plot_index]] * len(plot['errors'])
plot['y'], plot['errors'] = _normalize(plot['y'], plot['errors'],
base_values,
None or base_errors)
elif normalize_to.startswith('series__'):
base_series = normalize_to[8:]
_normalize_to_series(plots, base_series)
# Call the appropriate function to draw the line or bar plot
if plot_info.is_line:
figure, area_data = _create_line(plots, labels, plot_info)
else:
figure, area_data = _create_bar(plots, labels, plot_info)
# TODO(showard): extract these magic numbers to named constants
if extra_text:
text_y = .95 - .0075 * len(plots)
figure.text(.1, text_y, extra_text, size='xx-small')
return (figure, area_data)
def create_metrics_plot(query_dict, plot_type, inverted_series, normalize_to,
drilldown_callback, extra_text=None):
plot_info = MetricsPlot(query_dict, plot_type, inverted_series,
normalize_to, drilldown_callback)
figure, area_data = _create_metrics_plot_helper(plot_info, extra_text)
return _create_image_html(figure, area_data, plot_info)
def _get_hostnames_in_bucket(hist_data, bucket):
"""\
Get all the hostnames that constitute a particular bucket in the histogram.
hist_data: list containing tuples of (hostname, pass_rate)
bucket: tuple containing the (low, high) values of the target bucket
"""
return [hostname for hostname, pass_rate in hist_data
if bucket[0] <= pass_rate < bucket[1]]
def _create_qual_histogram_helper(plot_info, extra_text=None):
"""\
Create a machine qualification histogram of the given data.
plot_info: a QualificationHistogram
extra_text: text to show at the upper-left of the graph
TODO(showard): move much or all of this into methods on
QualificationHistogram
"""
cursor = readonly_connection.connection().cursor()
cursor.execute(plot_info.query)
if not cursor.rowcount:
raise NoDataError('query did not return any data')
# Lists to store the plot data.
# hist_data store tuples of (hostname, pass_rate) for machines that have
# pass rates between 0 and 100%, exclusive.
# no_tests is a list of machines that have run none of the selected tests
# no_pass is a list of machines with 0% pass rate
# perfect is a list of machines with a 100% pass rate
hist_data = []
no_tests = []
no_pass = []
perfect = []
# Construct the lists of data to plot
for hostname, total, good in cursor.fetchall():
if total == 0:
no_tests.append(hostname)
continue
if good == 0:
no_pass.append(hostname)
elif good == total:
perfect.append(hostname)
else:
percentage = 100.0 * good / total
hist_data.append((hostname, percentage))
interval = plot_info.interval
bins = range(0, 100, interval)
if bins[-1] != 100:
bins.append(bins[-1] + interval)
figure, height = _create_figure(_SINGLE_PLOT_HEIGHT)
subplot = figure.add_subplot(1, 1, 1)
# Plot the data and get all the bars plotted
_,_, bars = subplot.hist([data[1] for data in hist_data],
bins=bins, align='left')
bars += subplot.bar([-interval], len(no_pass),
width=interval, align='center')
bars += subplot.bar([bins[-1]], len(perfect),
width=interval, align='center')
bars += subplot.bar([-3 * interval], len(no_tests),
width=interval, align='center')
buckets = [(bin, min(bin + interval, 100)) for bin in bins[:-1]]
# set the x-axis range to cover all the normal bins plus the three "special"
# ones - N/A (3 intervals left), 0% (1 interval left) ,and 100% (far right)
subplot.set_xlim(-4 * interval, bins[-1] + interval)
subplot.set_xticks([-3 * interval, -interval] + bins + [100 + interval])
subplot.set_xticklabels(['N/A', '0%'] +
['%d%% - <%d%%' % bucket for bucket in buckets] +
['100%'], rotation=90, size='small')
# Find the coordinates on the image for each bar
x = []
y = []
for bar in bars:
x.append(bar.get_x())
y.append(bar.get_height())
f = subplot.plot(x, y, linestyle='None')[0]
upper_left_coords = f.get_transform().transform(zip(x, y))
bottom_right_coords = f.get_transform().transform(
[(x_val + interval, 0) for x_val in x])
# Set the title attributes
titles = ['%d%% - <%d%%: %d machines' % (bucket[0], bucket[1], y_val)
for bucket, y_val in zip(buckets, y)]
titles.append('0%%: %d machines' % len(no_pass))
titles.append('100%%: %d machines' % len(perfect))
titles.append('N/A: %d machines' % len(no_tests))
# Get the hostnames for each bucket in the histogram
names_list = [_get_hostnames_in_bucket(hist_data, bucket)
for bucket in buckets]
names_list += [no_pass, perfect]
if plot_info.filter_string:
plot_info.filter_string += ' AND '
# Construct the list of drilldown parameters to be passed when the user
# clicks on the bar.
params = []
for names in names_list:
if names:
hostnames = ','.join(_quote(hostname) for hostname in names)
hostname_filter = 'hostname IN (%s)' % hostnames
full_filter = plot_info.filter_string + hostname_filter
params.append({'type': 'normal',
'filterString': full_filter})
else:
params.append({'type': 'empty'})
params.append({'type': 'not_applicable',
'hosts': '<br />'.join(no_tests)})
area_data = [dict(left=ulx, top=height - uly,
right=brx, bottom=height - bry,
title=title, callback=plot_info.drilldown_callback,
callback_arguments=param_dict)
for (ulx, uly), (brx, bry), title, param_dict
in zip(upper_left_coords, bottom_right_coords, titles, params)]
# TODO(showard): extract these magic numbers to named constants
if extra_text:
figure.text(.1, .95, extra_text, size='xx-small')
return (figure, area_data)
def create_qual_histogram(query, filter_string, interval, drilldown_callback,
extra_text=None):
plot_info = QualificationHistogram(query, filter_string, interval,
drilldown_callback)
figure, area_data = _create_qual_histogram_helper(plot_info, extra_text)
return _create_image_html(figure, area_data, plot_info)
def create_embedded_plot(model, update_time):
"""\
Given an EmbeddedGraphingQuery object, generate the PNG image for it.
model: EmbeddedGraphingQuery object
update_time: 'Last updated' time
"""
params = pickle.loads(model.params)
extra_text = 'Last updated: %s' % update_time
if model.graph_type == 'metrics':
plot_info = MetricsPlot(query_dict=params['queries'],
plot_type=params['plot'],
inverted_series=params['invert'],
normalize_to=None,
drilldown_callback='')
figure, areas_unused = _create_metrics_plot_helper(plot_info,
extra_text)
elif model.graph_type == 'qual':
plot_info = QualificationHistogram(
query=params['query'], filter_string=params['filter_string'],
interval=params['interval'], drilldown_callback='')
figure, areas_unused = _create_qual_histogram_helper(plot_info,
extra_text)
else:
raise ValueError('Invalid graph_type %s' % model.graph_type)
image, bounding_box_unused = _create_png(figure)
return image
_cache_timeout = global_config.global_config.get_config_value(
'TKO', 'graph_cache_creation_timeout_minutes')
def handle_plot_request(id, max_age):
"""\
Given the embedding id of a graph, generate a PNG of the embedded graph
associated with that id.
id: id of the embedded graph
max_age: maximum age, in minutes, that a cached version should be held
"""
model = models.EmbeddedGraphingQuery.objects.get(id=id)
# Check if the cached image needs to be updated
now = datetime.datetime.now()
update_time = model.last_updated + datetime.timedelta(minutes=int(max_age))
if now > update_time:
cursor = django.db.connection.cursor()
# We want this query to update the refresh_time only once, even if
# multiple threads are running it at the same time. That is, only the
# first thread will win the race, and it will be the one to update the
# cached image; all other threads will show that they updated 0 rows
query = """
UPDATE embedded_graphing_queries
SET refresh_time = NOW()
WHERE id = %s AND (
refresh_time IS NULL OR
refresh_time + INTERVAL %s MINUTE < NOW()
)
"""
cursor.execute(query, (id, _cache_timeout))
# Only refresh the cached image if we were successful in updating the
# refresh time
if cursor.rowcount:
model.cached_png = create_embedded_plot(model, now.ctime())
model.last_updated = now
model.refresh_time = None
model.save()
return model.cached_png
| gpl-2.0 |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/io/formats/format.py | 1 | 56873 | """
Internal module for formatting output data in csv, html,
and latex files. This module also applies to display formatting.
"""
from functools import partial
from io import StringIO
import re
from shutil import get_terminal_size
from unicodedata import east_asian_width
import numpy as np
from pandas._config.config import get_option, set_option
from pandas._libs import lib
from pandas._libs.tslib import format_array_from_datetime
from pandas._libs.tslibs import NaT, Timedelta, Timestamp, iNaT
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_complex_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_numeric_dtype,
is_scalar,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import (
ABCIndexClass,
ABCMultiIndex,
ABCSeries,
ABCSparseArray,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.index import Index, ensure_index
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.io.common import _expand_user, _stringify_path
from pandas.io.formats.printing import adjoin, justify, pprint_thing
common_docstring = """
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : %(col_space_type)s, optional
%(col_space)s.
header : bool, optional
%(header)s.
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
min_rows : int, optional
The number of rows to display in the console in a truncated repr
(when number of rows is above `max_rows`).
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
.. versionadded:: 0.18.0
"""
_VALID_JUSTIFY_PARAMETERS = (
"left",
"right",
"center",
"justify",
"justify-all",
"start",
"end",
"inherit",
"match-parent",
"initial",
"unset",
)
return_docstring = """
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
"""
class CategoricalFormatter:
def __init__(self, categorical, buf=None, length=True, na_rep="NaN", footer=True):
self.categorical = categorical
self.buf = buf if buf is not None else StringIO("")
self.na_rep = na_rep
self.length = length
self.footer = footer
def _get_footer(self):
footer = ""
if self.length:
if footer:
footer += ", "
footer += "Length: {length}".format(length=len(self.categorical))
level_info = self.categorical._repr_categories_info()
# Levels are added in a newline
if footer:
footer += "\n"
footer += level_info
return str(footer)
def _get_formatted_values(self):
return format_array(
self.categorical._internal_get_values(),
None,
float_format=None,
na_rep=self.na_rep,
)
def to_string(self):
categorical = self.categorical
if len(categorical) == 0:
if self.footer:
return self._get_footer()
else:
return ""
fmt_values = self._get_formatted_values()
result = ["{i}".format(i=i) for i in fmt_values]
result = [i.strip() for i in result]
result = ", ".join(result)
result = ["[" + result + "]"]
if self.footer:
footer = self._get_footer()
if footer:
result.append(footer)
return str("\n".join(result))
class SeriesFormatter:
def __init__(
self,
series,
buf=None,
length=True,
header=True,
index=True,
na_rep="NaN",
name=False,
float_format=None,
dtype=True,
max_rows=None,
min_rows=None,
):
self.series = series
self.buf = buf if buf is not None else StringIO()
self.name = name
self.na_rep = na_rep
self.header = header
self.length = length
self.index = index
self.max_rows = max_rows
self.min_rows = min_rows
if float_format is None:
float_format = get_option("display.float_format")
self.float_format = float_format
self.dtype = dtype
self.adj = _get_adjustment()
self._chk_truncate()
def _chk_truncate(self):
from pandas.core.reshape.concat import concat
min_rows = self.min_rows
max_rows = self.max_rows
# truncation determined by max_rows, actual truncated number of rows
# used below by min_rows
truncate_v = max_rows and (len(self.series) > max_rows)
series = self.series
if truncate_v:
if min_rows:
# if min_rows is set (not None or 0), set max_rows to minimum
# of both
max_rows = min(min_rows, max_rows)
if max_rows == 1:
row_num = max_rows
series = series.iloc[:max_rows]
else:
row_num = max_rows // 2
series = concat((series.iloc[:row_num], series.iloc[-row_num:]))
self.tr_row_num = row_num
else:
self.tr_row_num = None
self.tr_series = series
self.truncate_v = truncate_v
def _get_footer(self):
name = self.series.name
footer = ""
if getattr(self.series.index, "freq", None) is not None:
footer += "Freq: {freq}".format(freq=self.series.index.freqstr)
if self.name is not False and name is not None:
if footer:
footer += ", "
series_name = pprint_thing(name, escape_chars=("\t", "\r", "\n"))
footer += (
("Name: {sname}".format(sname=series_name)) if name is not None else ""
)
if self.length is True or (self.length == "truncate" and self.truncate_v):
if footer:
footer += ", "
footer += "Length: {length}".format(length=len(self.series))
if self.dtype is not False and self.dtype is not None:
name = getattr(self.tr_series.dtype, "name", None)
if name:
if footer:
footer += ", "
footer += "dtype: {typ}".format(typ=pprint_thing(name))
# level infos are added to the end and in a new line, like it is done
# for Categoricals
if is_categorical_dtype(self.tr_series.dtype):
level_info = self.tr_series._values._repr_categories_info()
if footer:
footer += "\n"
footer += level_info
return str(footer)
def _get_formatted_index(self):
index = self.tr_series.index
is_multi = isinstance(index, ABCMultiIndex)
if is_multi:
have_header = any(name for name in index.names)
fmt_index = index.format(names=True)
else:
have_header = index.name is not None
fmt_index = index.format(name=True)
return fmt_index, have_header
def _get_formatted_values(self):
values_to_format = self.tr_series._formatting_values()
return format_array(
values_to_format, None, float_format=self.float_format, na_rep=self.na_rep
)
def to_string(self):
series = self.tr_series
footer = self._get_footer()
if len(series) == 0:
return "{name}([], {footer})".format(
name=self.series.__class__.__name__, footer=footer
)
fmt_index, have_header = self._get_formatted_index()
fmt_values = self._get_formatted_values()
if self.truncate_v:
n_header_rows = 0
row_num = self.tr_row_num
width = self.adj.len(fmt_values[row_num - 1])
if width > 3:
dot_str = "..."
else:
dot_str = ".."
# Series uses mode=center because it has single value columns
# DataFrame uses mode=left
dot_str = self.adj.justify([dot_str], width, mode="center")[0]
fmt_values.insert(row_num + n_header_rows, dot_str)
fmt_index.insert(row_num + 1, "")
if self.index:
result = self.adj.adjoin(3, *[fmt_index[1:], fmt_values])
else:
result = self.adj.adjoin(3, fmt_values)
if self.header and have_header:
result = fmt_index[0] + "\n" + result
if footer:
result += "\n" + footer
return str("".join(result))
class TextAdjustment:
def __init__(self):
self.encoding = get_option("display.encoding")
def len(self, text):
return len(text)
def justify(self, texts, max_len, mode="right"):
return justify(texts, max_len, mode=mode)
def adjoin(self, space, *lists, **kwargs):
return adjoin(space, *lists, strlen=self.len, justfunc=self.justify, **kwargs)
class EastAsianTextAdjustment(TextAdjustment):
def __init__(self):
super().__init__()
if get_option("display.unicode.ambiguous_as_wide"):
self.ambiguous_width = 2
else:
self.ambiguous_width = 1
# Definition of East Asian Width
# http://unicode.org/reports/tr11/
# Ambiguous width can be changed by option
self._EAW_MAP = {"Na": 1, "N": 1, "W": 2, "F": 2, "H": 1}
def len(self, text):
"""
Calculate display width considering unicode East Asian Width
"""
if not isinstance(text, str):
return len(text)
return sum(
self._EAW_MAP.get(east_asian_width(c), self.ambiguous_width) for c in text
)
def justify(self, texts, max_len, mode="right"):
# re-calculate padding space per str considering East Asian Width
def _get_pad(t):
return max_len - self.len(t) + len(t)
if mode == "left":
return [x.ljust(_get_pad(x)) for x in texts]
elif mode == "center":
return [x.center(_get_pad(x)) for x in texts]
else:
return [x.rjust(_get_pad(x)) for x in texts]
def _get_adjustment():
use_east_asian_width = get_option("display.unicode.east_asian_width")
if use_east_asian_width:
return EastAsianTextAdjustment()
else:
return TextAdjustment()
class TableFormatter:
show_dimensions = None
@property
def should_show_dimensions(self):
return self.show_dimensions is True or (
self.show_dimensions == "truncate" and self.is_truncated
)
def _get_formatter(self, i):
if isinstance(self.formatters, (list, tuple)):
if is_integer(i):
return self.formatters[i]
else:
return None
else:
if is_integer(i) and i not in self.columns:
i = self.columns[i]
return self.formatters.get(i, None)
class DataFrameFormatter(TableFormatter):
"""
Render a DataFrame
self.to_string() : console-friendly tabular output
self.to_html() : html table
self.to_latex() : LaTeX tabular environment table
"""
__doc__ = __doc__ if __doc__ else ""
__doc__ += common_docstring + return_docstring
def __init__(
self,
frame,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
justify=None,
float_format=None,
sparsify=None,
index_names=True,
line_width=None,
max_rows=None,
min_rows=None,
max_cols=None,
show_dimensions=False,
decimal=".",
table_id=None,
render_links=False,
**kwds
):
self.frame = frame
if buf is not None:
self.buf = _expand_user(_stringify_path(buf))
else:
self.buf = StringIO()
self.show_index_names = index_names
if sparsify is None:
sparsify = get_option("display.multi_sparse")
self.sparsify = sparsify
self.float_format = float_format
self.formatters = formatters if formatters is not None else {}
self.na_rep = na_rep
self.decimal = decimal
self.col_space = col_space
self.header = header
self.index = index
self.line_width = line_width
self.max_rows = max_rows
self.min_rows = min_rows
self.max_cols = max_cols
self.max_rows_displayed = min(max_rows or len(self.frame), len(self.frame))
self.show_dimensions = show_dimensions
self.table_id = table_id
self.render_links = render_links
if justify is None:
self.justify = get_option("display.colheader_justify")
else:
self.justify = justify
self.kwds = kwds
if columns is not None:
self.columns = ensure_index(columns)
self.frame = self.frame[self.columns]
else:
self.columns = frame.columns
self._chk_truncate()
self.adj = _get_adjustment()
def _chk_truncate(self):
"""
Checks whether the frame should be truncated. If so, slices
the frame up.
"""
from pandas.core.reshape.concat import concat
# Cut the data to the information actually printed
max_cols = self.max_cols
max_rows = self.max_rows
if max_cols == 0 or max_rows == 0: # assume we are in the terminal
(w, h) = get_terminal_size()
self.w = w
self.h = h
if self.max_rows == 0:
dot_row = 1
prompt_row = 1
if self.show_dimensions:
show_dimension_rows = 3
n_add_rows = self.header + dot_row + show_dimension_rows + prompt_row
# rows available to fill with actual data
max_rows_adj = self.h - n_add_rows
self.max_rows_adj = max_rows_adj
# Format only rows and columns that could potentially fit the
# screen
if max_cols == 0 and len(self.frame.columns) > w:
max_cols = w
if max_rows == 0 and len(self.frame) > h:
max_rows = h
if not hasattr(self, "max_rows_adj"):
if max_rows:
if (len(self.frame) > max_rows) and self.min_rows:
# if truncated, set max_rows showed to min_rows
max_rows = min(self.min_rows, max_rows)
self.max_rows_adj = max_rows
if not hasattr(self, "max_cols_adj"):
self.max_cols_adj = max_cols
max_cols_adj = self.max_cols_adj
max_rows_adj = self.max_rows_adj
truncate_h = max_cols_adj and (len(self.columns) > max_cols_adj)
truncate_v = max_rows_adj and (len(self.frame) > max_rows_adj)
frame = self.frame
if truncate_h:
if max_cols_adj == 0:
col_num = len(frame.columns)
elif max_cols_adj == 1:
frame = frame.iloc[:, :max_cols]
col_num = max_cols
else:
col_num = max_cols_adj // 2
frame = concat(
(frame.iloc[:, :col_num], frame.iloc[:, -col_num:]), axis=1
)
self.tr_col_num = col_num
if truncate_v:
if max_rows_adj == 1:
row_num = max_rows
frame = frame.iloc[:max_rows, :]
else:
row_num = max_rows_adj // 2
frame = concat((frame.iloc[:row_num, :], frame.iloc[-row_num:, :]))
self.tr_row_num = row_num
else:
self.tr_row_num = None
self.tr_frame = frame
self.truncate_h = truncate_h
self.truncate_v = truncate_v
self.is_truncated = self.truncate_h or self.truncate_v
def _to_str_columns(self):
"""
Render a DataFrame to a list of columns (as lists of strings).
"""
frame = self.tr_frame
# may include levels names also
str_index = self._get_formatted_index(frame)
if not is_list_like(self.header) and not self.header:
stringified = []
for i, c in enumerate(frame):
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(
fmt_values,
self.justify,
minimum=(self.col_space or 0),
adj=self.adj,
)
stringified.append(fmt_values)
else:
if is_list_like(self.header):
if len(self.header) != len(self.columns):
raise ValueError(
(
"Writing {ncols} cols but got {nalias} "
"aliases".format(
ncols=len(self.columns), nalias=len(self.header)
)
)
)
str_columns = [[label] for label in self.header]
else:
str_columns = self._get_formatted_column_labels(frame)
if self.show_row_idx_names:
for x in str_columns:
x.append("")
stringified = []
for i, c in enumerate(frame):
cheader = str_columns[i]
header_colwidth = max(
self.col_space or 0, *(self.adj.len(x) for x in cheader)
)
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(
fmt_values, self.justify, minimum=header_colwidth, adj=self.adj
)
max_len = max(max(self.adj.len(x) for x in fmt_values), header_colwidth)
cheader = self.adj.justify(cheader, max_len, mode=self.justify)
stringified.append(cheader + fmt_values)
strcols = stringified
if self.index:
strcols.insert(0, str_index)
# Add ... to signal truncated
truncate_h = self.truncate_h
truncate_v = self.truncate_v
if truncate_h:
col_num = self.tr_col_num
strcols.insert(self.tr_col_num + 1, [" ..."] * (len(str_index)))
if truncate_v:
n_header_rows = len(str_index) - len(frame)
row_num = self.tr_row_num
for ix, col in enumerate(strcols):
# infer from above row
cwidth = self.adj.len(strcols[ix][row_num])
is_dot_col = False
if truncate_h:
is_dot_col = ix == col_num + 1
if cwidth > 3 or is_dot_col:
my_str = "..."
else:
my_str = ".."
if ix == 0:
dot_mode = "left"
elif is_dot_col:
cwidth = 4
dot_mode = "right"
else:
dot_mode = "right"
dot_str = self.adj.justify([my_str], cwidth, mode=dot_mode)[0]
strcols[ix].insert(row_num + n_header_rows, dot_str)
return strcols
def to_string(self):
"""
Render a DataFrame to a console-friendly tabular output.
"""
from pandas import Series
frame = self.frame
if len(frame.columns) == 0 or len(frame.index) == 0:
info_line = "Empty {name}\nColumns: {col}\nIndex: {idx}".format(
name=type(self.frame).__name__,
col=pprint_thing(frame.columns),
idx=pprint_thing(frame.index),
)
text = info_line
else:
strcols = self._to_str_columns()
if self.line_width is None: # no need to wrap around just print
# the whole frame
text = self.adj.adjoin(1, *strcols)
elif (
not isinstance(self.max_cols, int) or self.max_cols > 0
): # need to wrap around
text = self._join_multiline(*strcols)
else: # max_cols == 0. Try to fit frame to terminal
text = self.adj.adjoin(1, *strcols).split("\n")
max_len = Series(text).str.len().max()
# plus truncate dot col
dif = max_len - self.w
# '+ 1' to avoid too wide repr (GH PR #17023)
adj_dif = dif + 1
col_lens = Series([Series(ele).apply(len).max() for ele in strcols])
n_cols = len(col_lens)
counter = 0
while adj_dif > 0 and n_cols > 1:
counter += 1
mid = int(round(n_cols / 2.0))
mid_ix = col_lens.index[mid]
col_len = col_lens[mid_ix]
# adjoin adds one
adj_dif -= col_len + 1
col_lens = col_lens.drop(mid_ix)
n_cols = len(col_lens)
# subtract index column
max_cols_adj = n_cols - self.index
# GH-21180. Ensure that we print at least two.
max_cols_adj = max(max_cols_adj, 2)
self.max_cols_adj = max_cols_adj
# Call again _chk_truncate to cut frame appropriately
# and then generate string representation
self._chk_truncate()
strcols = self._to_str_columns()
text = self.adj.adjoin(1, *strcols)
self.buf.writelines(text)
if self.should_show_dimensions:
self.buf.write(
"\n\n[{nrows} rows x {ncols} columns]".format(
nrows=len(frame), ncols=len(frame.columns)
)
)
def _join_multiline(self, *strcols):
lwidth = self.line_width
adjoin_width = 1
strcols = list(strcols)
if self.index:
idx = strcols.pop(0)
lwidth -= np.array([self.adj.len(x) for x in idx]).max() + adjoin_width
col_widths = [
np.array([self.adj.len(x) for x in col]).max() if len(col) > 0 else 0
for col in strcols
]
col_bins = _binify(col_widths, lwidth)
nbins = len(col_bins)
if self.truncate_v:
nrows = self.max_rows_adj + 1
else:
nrows = len(self.frame)
str_lst = []
st = 0
for i, ed in enumerate(col_bins):
row = strcols[st:ed]
if self.index:
row.insert(0, idx)
if nbins > 1:
if ed <= len(strcols) and i < nbins - 1:
row.append([" \\"] + [" "] * (nrows - 1))
else:
row.append([" "] * nrows)
str_lst.append(self.adj.adjoin(adjoin_width, *row))
st = ed
return "\n\n".join(str_lst)
def to_latex(
self,
column_format=None,
longtable=False,
encoding=None,
multicolumn=False,
multicolumn_format=None,
multirow=False,
):
"""
Render a DataFrame to a LaTeX tabular/longtable environment output.
"""
from pandas.io.formats.latex import LatexFormatter
latex_renderer = LatexFormatter(
self,
column_format=column_format,
longtable=longtable,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow,
)
if encoding is None:
encoding = "utf-8"
if hasattr(self.buf, "write"):
latex_renderer.write_result(self.buf)
elif isinstance(self.buf, str):
import codecs
with codecs.open(self.buf, "w", encoding=encoding) as f:
latex_renderer.write_result(f)
else:
raise TypeError("buf is not a file name and it has no write " "method")
def _format_col(self, i):
frame = self.tr_frame
formatter = self._get_formatter(i)
values_to_format = frame.iloc[:, i]._formatting_values()
return format_array(
values_to_format,
formatter,
float_format=self.float_format,
na_rep=self.na_rep,
space=self.col_space,
decimal=self.decimal,
)
def to_html(self, classes=None, notebook=False, border=None):
"""
Render a DataFrame to a html table.
Parameters
----------
classes : str or list-like
classes to include in the `class` attribute of the opening
``<table>`` tag, in addition to the default "dataframe".
notebook : {True, False}, optional, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
``<table>`` tag. Default ``pd.options.display.html.border``.
.. versionadded:: 0.19.0
"""
from pandas.io.formats.html import HTMLFormatter, NotebookFormatter
Klass = NotebookFormatter if notebook else HTMLFormatter
html = Klass(self, classes=classes, border=border).render()
if hasattr(self.buf, "write"):
buffer_put_lines(self.buf, html)
elif isinstance(self.buf, str):
with open(self.buf, "w") as f:
buffer_put_lines(f, html)
else:
raise TypeError("buf is not a file name and it has no write " " method")
def _get_formatted_column_labels(self, frame):
from pandas.core.index import _sparsify
columns = frame.columns
if isinstance(columns, ABCMultiIndex):
fmt_columns = columns.format(sparsify=False, adjoin=False)
fmt_columns = list(zip(*fmt_columns))
dtypes = self.frame.dtypes._values
# if we have a Float level, they don't use leading space at all
restrict_formatting = any(l.is_floating for l in columns.levels)
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
def space_format(x, y):
if (
y not in self.formatters
and need_leadsp[x]
and not restrict_formatting
):
return " " + y
return y
str_columns = list(
zip(*[[space_format(x, y) for y in x] for x in fmt_columns])
)
if self.sparsify and len(str_columns):
str_columns = _sparsify(str_columns)
str_columns = [list(x) for x in zip(*str_columns)]
else:
fmt_columns = columns.format()
dtypes = self.frame.dtypes
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
str_columns = [
[" " + x if not self._get_formatter(i) and need_leadsp[x] else x]
for i, (col, x) in enumerate(zip(columns, fmt_columns))
]
# self.str_columns = str_columns
return str_columns
@property
def has_index_names(self):
return _has_names(self.frame.index)
@property
def has_column_names(self):
return _has_names(self.frame.columns)
@property
def show_row_idx_names(self):
return all((self.has_index_names, self.index, self.show_index_names))
@property
def show_col_idx_names(self):
return all((self.has_column_names, self.show_index_names, self.header))
def _get_formatted_index(self, frame):
# Note: this is only used by to_string() and to_latex(), not by
# to_html().
index = frame.index
columns = frame.columns
fmt = self._get_formatter("__index__")
if isinstance(index, ABCMultiIndex):
fmt_index = index.format(
sparsify=self.sparsify,
adjoin=False,
names=self.show_row_idx_names,
formatter=fmt,
)
else:
fmt_index = [index.format(name=self.show_row_idx_names, formatter=fmt)]
fmt_index = [
tuple(
_make_fixed_width(
list(x), justify="left", minimum=(self.col_space or 0), adj=self.adj
)
)
for x in fmt_index
]
adjoined = self.adj.adjoin(1, *fmt_index).split("\n")
# empty space for columns
if self.show_col_idx_names:
col_header = ["{x}".format(x=x) for x in self._get_column_name_list()]
else:
col_header = [""] * columns.nlevels
if self.header:
return col_header + adjoined
else:
return adjoined
def _get_column_name_list(self):
names = []
columns = self.frame.columns
if isinstance(columns, ABCMultiIndex):
names.extend("" if name is None else name for name in columns.names)
else:
names.append("" if columns.name is None else columns.name)
return names
# ----------------------------------------------------------------------
# Array formatters
def format_array(
values,
formatter,
float_format=None,
na_rep="NaN",
digits=None,
space=None,
justify="right",
decimal=".",
leading_space=None,
):
"""
Format an array for printing.
Parameters
----------
values
formatter
float_format
na_rep
digits
space
justify
decimal
leading_space : bool, optional
Whether the array should be formatted with a leading space.
When an array as a column of a Series or DataFrame, we do want
the leading space to pad between columns.
When formatting an Index subclass
(e.g. IntervalIndex._format_native_types), we don't want the
leading space since it should be left-aligned.
Returns
-------
List[str]
"""
if is_datetime64_dtype(values.dtype):
fmt_klass = Datetime64Formatter
elif is_datetime64tz_dtype(values):
fmt_klass = Datetime64TZFormatter
elif is_timedelta64_dtype(values.dtype):
fmt_klass = Timedelta64Formatter
elif is_extension_array_dtype(values.dtype):
fmt_klass = ExtensionArrayFormatter
elif is_float_dtype(values.dtype) or is_complex_dtype(values.dtype):
fmt_klass = FloatArrayFormatter
elif is_integer_dtype(values.dtype):
fmt_klass = IntArrayFormatter
else:
fmt_klass = GenericArrayFormatter
if space is None:
space = get_option("display.column_space")
if float_format is None:
float_format = get_option("display.float_format")
if digits is None:
digits = get_option("display.precision")
fmt_obj = fmt_klass(
values,
digits=digits,
na_rep=na_rep,
float_format=float_format,
formatter=formatter,
space=space,
justify=justify,
decimal=decimal,
leading_space=leading_space,
)
return fmt_obj.get_result()
class GenericArrayFormatter:
def __init__(
self,
values,
digits=7,
formatter=None,
na_rep="NaN",
space=12,
float_format=None,
justify="right",
decimal=".",
quoting=None,
fixed_width=True,
leading_space=None,
):
self.values = values
self.digits = digits
self.na_rep = na_rep
self.space = space
self.formatter = formatter
self.float_format = float_format
self.justify = justify
self.decimal = decimal
self.quoting = quoting
self.fixed_width = fixed_width
self.leading_space = leading_space
def get_result(self):
fmt_values = self._format_strings()
return _make_fixed_width(fmt_values, self.justify)
def _format_strings(self):
if self.float_format is None:
float_format = get_option("display.float_format")
if float_format is None:
fmt_str = "{{x: .{prec:d}g}}".format(
prec=get_option("display.precision")
)
float_format = lambda x: fmt_str.format(x=x)
else:
float_format = self.float_format
formatter = (
self.formatter
if self.formatter is not None
else (lambda x: pprint_thing(x, escape_chars=("\t", "\r", "\n")))
)
def _format(x):
if self.na_rep is not None and is_scalar(x) and isna(x):
try:
# try block for np.isnat specifically
# determine na_rep if x is None or NaT-like
if x is None:
return "None"
elif x is NaT or np.isnat(x):
return "NaT"
except (TypeError, ValueError):
# np.isnat only handles datetime or timedelta objects
pass
return self.na_rep
elif isinstance(x, PandasObject):
return "{x}".format(x=x)
else:
# object dtype
return "{x}".format(x=formatter(x))
vals = self.values
if isinstance(vals, Index):
vals = vals._values
elif isinstance(vals, ABCSparseArray):
vals = vals.values
is_float_type = lib.map_infer(vals, is_float) & notna(vals)
leading_space = self.leading_space
if leading_space is None:
leading_space = is_float_type.any()
fmt_values = []
for i, v in enumerate(vals):
if not is_float_type[i] and leading_space:
fmt_values.append(" {v}".format(v=_format(v)))
elif is_float_type[i]:
fmt_values.append(float_format(v))
else:
if leading_space is False:
# False specifically, so that the default is
# to include a space if we get here.
tpl = "{v}"
else:
tpl = " {v}"
fmt_values.append(tpl.format(v=_format(v)))
return fmt_values
class FloatArrayFormatter(GenericArrayFormatter):
"""
"""
def __init__(self, *args, **kwargs):
GenericArrayFormatter.__init__(self, *args, **kwargs)
# float_format is expected to be a string
# formatter should be used to pass a function
if self.float_format is not None and self.formatter is None:
# GH21625, GH22270
self.fixed_width = False
if callable(self.float_format):
self.formatter = self.float_format
self.float_format = None
def _value_formatter(self, float_format=None, threshold=None):
"""Returns a function to be applied on each value to format it
"""
# the float_format parameter supersedes self.float_format
if float_format is None:
float_format = self.float_format
# we are going to compose different functions, to first convert to
# a string, then replace the decimal symbol, and finally chop according
# to the threshold
# when there is no float_format, we use str instead of '%g'
# because str(0.0) = '0.0' while '%g' % 0.0 = '0'
if float_format:
def base_formatter(v):
return float_format(value=v) if notna(v) else self.na_rep
else:
def base_formatter(v):
return str(v) if notna(v) else self.na_rep
if self.decimal != ".":
def decimal_formatter(v):
return base_formatter(v).replace(".", self.decimal, 1)
else:
decimal_formatter = base_formatter
if threshold is None:
return decimal_formatter
def formatter(value):
if notna(value):
if abs(value) > threshold:
return decimal_formatter(value)
else:
return decimal_formatter(0.0)
else:
return self.na_rep
return formatter
def get_result_as_array(self):
"""
Returns the float values converted into strings using
the parameters given at initialisation, as a numpy array
"""
if self.formatter is not None:
return np.array([self.formatter(x) for x in self.values])
if self.fixed_width:
threshold = get_option("display.chop_threshold")
else:
threshold = None
# if we have a fixed_width, we'll need to try different float_format
def format_values_with(float_format):
formatter = self._value_formatter(float_format, threshold)
# default formatter leaves a space to the left when formatting
# floats, must be consistent for left-justifying NaNs (GH #25061)
if self.justify == "left":
na_rep = " " + self.na_rep
else:
na_rep = self.na_rep
# separate the wheat from the chaff
values = self.values
is_complex = is_complex_dtype(values)
mask = isna(values)
if hasattr(values, "to_dense"): # sparse numpy ndarray
values = values.to_dense()
values = np.array(values, dtype="object")
values[mask] = na_rep
imask = (~mask).ravel()
values.flat[imask] = np.array(
[formatter(val) for val in values.ravel()[imask]]
)
if self.fixed_width:
if is_complex:
return _trim_zeros_complex(values, na_rep)
else:
return _trim_zeros_float(values, na_rep)
return values
# There is a special default string when we are fixed-width
# The default is otherwise to use str instead of a formatting string
if self.float_format is None:
if self.fixed_width:
float_format = partial(
"{value: .{digits:d}f}".format, digits=self.digits
)
else:
float_format = self.float_format
else:
float_format = lambda value: self.float_format % value
formatted_values = format_values_with(float_format)
if not self.fixed_width:
return formatted_values
# we need do convert to engineering format if some values are too small
# and would appear as 0, or if some values are too big and take too
# much space
if len(formatted_values) > 0:
maxlen = max(len(x) for x in formatted_values)
too_long = maxlen > self.digits + 6
else:
too_long = False
with np.errstate(invalid="ignore"):
abs_vals = np.abs(self.values)
# this is pretty arbitrary for now
# large values: more that 8 characters including decimal symbol
# and first digit, hence > 1e6
has_large_values = (abs_vals > 1e6).any()
has_small_values = (
(abs_vals < 10 ** (-self.digits)) & (abs_vals > 0)
).any()
if has_small_values or (too_long and has_large_values):
float_format = partial("{value: .{digits:d}e}".format, digits=self.digits)
formatted_values = format_values_with(float_format)
return formatted_values
def _format_strings(self):
# shortcut
if self.formatter is not None:
return [self.formatter(x) for x in self.values]
return list(self.get_result_as_array())
class IntArrayFormatter(GenericArrayFormatter):
def _format_strings(self):
formatter = self.formatter or (lambda x: "{x: d}".format(x=x))
fmt_values = [formatter(x) for x in self.values]
return fmt_values
class Datetime64Formatter(GenericArrayFormatter):
def __init__(self, values, nat_rep="NaT", date_format=None, **kwargs):
super().__init__(values, **kwargs)
self.nat_rep = nat_rep
self.date_format = date_format
def _format_strings(self):
""" we by definition have DO NOT have a TZ """
values = self.values
if not isinstance(values, DatetimeIndex):
values = DatetimeIndex(values)
if self.formatter is not None and callable(self.formatter):
return [self.formatter(x) for x in values]
fmt_values = format_array_from_datetime(
values.asi8.ravel(),
format=_get_format_datetime64_from_values(values, self.date_format),
na_rep=self.nat_rep,
).reshape(values.shape)
return fmt_values.tolist()
class ExtensionArrayFormatter(GenericArrayFormatter):
def _format_strings(self):
values = self.values
if isinstance(values, (ABCIndexClass, ABCSeries)):
values = values._values
formatter = values._formatter(boxed=True)
if is_categorical_dtype(values.dtype):
# Categorical is special for now, so that we can preserve tzinfo
array = values._internal_get_values()
else:
array = np.asarray(values)
fmt_values = format_array(
array,
formatter,
float_format=self.float_format,
na_rep=self.na_rep,
digits=self.digits,
space=self.space,
justify=self.justify,
leading_space=self.leading_space,
)
return fmt_values
def format_percentiles(percentiles):
"""
Outputs rounded and formatted percentiles.
Parameters
----------
percentiles : list-like, containing floats from interval [0,1]
Returns
-------
formatted : list of strings
Notes
-----
Rounding precision is chosen so that: (1) if any two elements of
``percentiles`` differ, they remain different after rounding
(2) no entry is *rounded* to 0% or 100%.
Any non-integer is always rounded to at least 1 decimal place.
Examples
--------
Keeps all entries different after rounding:
>>> format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999])
['1.999%', '2.001%', '50%', '66.667%', '99.99%']
No element is rounded to 0% or 100% (unless already equal to it).
Duplicates are allowed:
>>> format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999])
['0%', '50%', '2.0%', '50%', '66.67%', '99.99%']
"""
percentiles = np.asarray(percentiles)
# It checks for np.NaN as well
with np.errstate(invalid="ignore"):
if (
not is_numeric_dtype(percentiles)
or not np.all(percentiles >= 0)
or not np.all(percentiles <= 1)
):
raise ValueError("percentiles should all be in the interval [0,1]")
percentiles = 100 * percentiles
int_idx = np.isclose(percentiles.astype(int), percentiles)
if np.all(int_idx):
out = percentiles.astype(int).astype(str)
return [i + "%" for i in out]
unique_pcts = np.unique(percentiles)
to_begin = unique_pcts[0] if unique_pcts[0] > 0 else None
to_end = 100 - unique_pcts[-1] if unique_pcts[-1] < 100 else None
# Least precision that keeps percentiles unique after rounding
prec = -np.floor(
np.log10(np.min(np.ediff1d(unique_pcts, to_begin=to_begin, to_end=to_end)))
).astype(int)
prec = max(1, prec)
out = np.empty_like(percentiles, dtype=object)
out[int_idx] = percentiles[int_idx].astype(int).astype(str)
out[~int_idx] = percentiles[~int_idx].round(prec).astype(str)
return [i + "%" for i in out]
def _is_dates_only(values):
# return a boolean if we are only dates (and don't have a timezone)
assert values.ndim == 1
values = DatetimeIndex(values)
if values.tz is not None:
return False
values_int = values.asi8
consider_values = values_int != iNaT
one_day_nanos = 86400 * 1e9
even_days = (
np.logical_and(consider_values, values_int % int(one_day_nanos) != 0).sum() == 0
)
if even_days:
return True
return False
def _format_datetime64(x, tz=None, nat_rep="NaT"):
if x is None or (is_scalar(x) and isna(x)):
return nat_rep
if tz is not None or not isinstance(x, Timestamp):
if getattr(x, "tzinfo", None) is not None:
x = Timestamp(x).tz_convert(tz)
else:
x = Timestamp(x).tz_localize(tz)
return str(x)
def _format_datetime64_dateonly(x, nat_rep="NaT", date_format=None):
if x is None or (is_scalar(x) and isna(x)):
return nat_rep
if not isinstance(x, Timestamp):
x = Timestamp(x)
if date_format:
return x.strftime(date_format)
else:
return x._date_repr
def _get_format_datetime64(is_dates_only, nat_rep="NaT", date_format=None):
if is_dates_only:
return lambda x, tz=None: _format_datetime64_dateonly(
x, nat_rep=nat_rep, date_format=date_format
)
else:
return lambda x, tz=None: _format_datetime64(x, tz=tz, nat_rep=nat_rep)
def _get_format_datetime64_from_values(values, date_format):
""" given values and a date_format, return a string format """
if isinstance(values, np.ndarray) and values.ndim > 1:
# We don't actaully care about the order of values, and DatetimeIndex
# only accepts 1D values
values = values.ravel()
is_dates_only = _is_dates_only(values)
if is_dates_only:
return date_format or "%Y-%m-%d"
return date_format
class Datetime64TZFormatter(Datetime64Formatter):
def _format_strings(self):
""" we by definition have a TZ """
values = self.values.astype(object)
is_dates_only = _is_dates_only(values)
formatter = self.formatter or _get_format_datetime64(
is_dates_only, date_format=self.date_format
)
fmt_values = [formatter(x) for x in values]
return fmt_values
class Timedelta64Formatter(GenericArrayFormatter):
def __init__(self, values, nat_rep="NaT", box=False, **kwargs):
super().__init__(values, **kwargs)
self.nat_rep = nat_rep
self.box = box
def _format_strings(self):
formatter = self.formatter or _get_format_timedelta64(
self.values, nat_rep=self.nat_rep, box=self.box
)
fmt_values = np.array([formatter(x) for x in self.values])
return fmt_values
def _get_format_timedelta64(values, nat_rep="NaT", box=False):
"""
Return a formatter function for a range of timedeltas.
These will all have the same format argument
If box, then show the return in quotes
"""
values_int = values.astype(np.int64)
consider_values = values_int != iNaT
one_day_nanos = 86400 * 1e9
even_days = (
np.logical_and(consider_values, values_int % one_day_nanos != 0).sum() == 0
)
all_sub_day = (
np.logical_and(consider_values, np.abs(values_int) >= one_day_nanos).sum() == 0
)
if even_days:
format = None
elif all_sub_day:
format = "sub_day"
else:
format = "long"
def _formatter(x):
if x is None or (is_scalar(x) and isna(x)):
return nat_rep
if not isinstance(x, Timedelta):
x = Timedelta(x)
result = x._repr_base(format=format)
if box:
result = "'{res}'".format(res=result)
return result
return _formatter
def _make_fixed_width(strings, justify="right", minimum=None, adj=None):
if len(strings) == 0 or justify == "all":
return strings
if adj is None:
adj = _get_adjustment()
max_len = max(adj.len(x) for x in strings)
if minimum is not None:
max_len = max(minimum, max_len)
conf_max = get_option("display.max_colwidth")
if conf_max is not None and max_len > conf_max:
max_len = conf_max
def just(x):
if conf_max is not None:
if (conf_max > 3) & (adj.len(x) > max_len):
x = x[: max_len - 3] + "..."
return x
strings = [just(x) for x in strings]
result = adj.justify(strings, max_len, mode=justify)
return result
def _trim_zeros_complex(str_complexes, na_rep="NaN"):
"""
Separates the real and imaginary parts from the complex number, and
executes the _trim_zeros_float method on each of those.
"""
return [
"".join(_trim_zeros_float(re.split(r"([j+-])", x), na_rep))
for x in str_complexes
]
def _trim_zeros_float(str_floats, na_rep="NaN"):
"""
Trims zeros, leaving just one before the decimal points if need be.
"""
trimmed = str_floats
def _is_number(x):
return x != na_rep and not x.endswith("inf")
def _cond(values):
finite = [x for x in values if _is_number(x)]
return (
len(finite) > 0
and all(x.endswith("0") for x in finite)
and not (any(("e" in x) or ("E" in x) for x in finite))
)
while _cond(trimmed):
trimmed = [x[:-1] if _is_number(x) else x for x in trimmed]
# leave one 0 after the decimal points if need be.
return [x + "0" if x.endswith(".") and _is_number(x) else x for x in trimmed]
def _has_names(index):
if isinstance(index, ABCMultiIndex):
return com._any_not_none(*index.names)
else:
return index.name is not None
class EngFormatter:
"""
Formats float values according to engineering format.
Based on matplotlib.ticker.EngFormatter
"""
# The SI engineering prefixes
ENG_PREFIXES = {
-24: "y",
-21: "z",
-18: "a",
-15: "f",
-12: "p",
-9: "n",
-6: "u",
-3: "m",
0: "",
3: "k",
6: "M",
9: "G",
12: "T",
15: "P",
18: "E",
21: "Z",
24: "Y",
}
def __init__(self, accuracy=None, use_eng_prefix=False):
self.accuracy = accuracy
self.use_eng_prefix = use_eng_prefix
def __call__(self, num):
""" Formats a number in engineering notation, appending a letter
representing the power of 1000 of the original number. Some examples:
>>> format_eng(0) # for self.accuracy = 0
' 0'
>>> format_eng(1000000) # for self.accuracy = 1,
# self.use_eng_prefix = True
' 1.0M'
>>> format_eng("-1e-6") # for self.accuracy = 2
# self.use_eng_prefix = False
'-1.00E-06'
@param num: the value to represent
@type num: either a numeric value or a string that can be converted to
a numeric value (as per decimal.Decimal constructor)
@return: engineering formatted string
"""
import decimal
import math
dnum = decimal.Decimal(str(num))
if decimal.Decimal.is_nan(dnum):
return "NaN"
if decimal.Decimal.is_infinite(dnum):
return "inf"
sign = 1
if dnum < 0: # pragma: no cover
sign = -1
dnum = -dnum
if dnum != 0:
pow10 = decimal.Decimal(int(math.floor(dnum.log10() / 3) * 3))
else:
pow10 = decimal.Decimal(0)
pow10 = pow10.min(max(self.ENG_PREFIXES.keys()))
pow10 = pow10.max(min(self.ENG_PREFIXES.keys()))
int_pow10 = int(pow10)
if self.use_eng_prefix:
prefix = self.ENG_PREFIXES[int_pow10]
else:
if int_pow10 < 0:
prefix = "E-{pow10:02d}".format(pow10=-int_pow10)
else:
prefix = "E+{pow10:02d}".format(pow10=int_pow10)
mant = sign * dnum / (10 ** pow10)
if self.accuracy is None: # pragma: no cover
format_str = "{mant: g}{prefix}"
else:
format_str = "{{mant: .{acc:d}f}}{{prefix}}".format(acc=self.accuracy)
formatted = format_str.format(mant=mant, prefix=prefix)
return formatted
def set_eng_float_format(accuracy=3, use_eng_prefix=False):
"""
Alter default behavior on how float is formatted in DataFrame.
Format float in engineering format. By accuracy, we mean the number of
decimal digits after the floating point.
See also EngFormatter.
"""
set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix))
set_option("display.column_space", max(12, accuracy + 9))
def _binify(cols, line_width):
adjoin_width = 1
bins = []
curr_width = 0
i_last_column = len(cols) - 1
for i, w in enumerate(cols):
w_adjoined = w + adjoin_width
curr_width += w_adjoined
if i_last_column == i:
wrap = curr_width + 1 > line_width and i > 0
else:
wrap = curr_width + 2 > line_width and i > 0
if wrap:
bins.append(i)
curr_width = w_adjoined
bins.append(len(cols))
return bins
def get_level_lengths(levels, sentinel=""):
"""For each index in each level the function returns lengths of indexes.
Parameters
----------
levels : list of lists
List of values on for level.
sentinel : string, optional
Value which states that no new index starts on there.
Returns
-------
Returns list of maps. For each level returns map of indexes (key is index
in row and value is length of index).
"""
if len(levels) == 0:
return []
control = [True] * len(levels[0])
result = []
for level in levels:
last_index = 0
lengths = {}
for i, key in enumerate(level):
if control[i] and key == sentinel:
pass
else:
control[i] = False
lengths[last_index] = i - last_index
last_index = i
lengths[last_index] = len(level) - last_index
result.append(lengths)
return result
def buffer_put_lines(buf, lines):
"""
Appends lines to a buffer.
Parameters
----------
buf
The buffer to write to
lines
The lines to append.
"""
if any(isinstance(x, str) for x in lines):
lines = [str(x) for x in lines]
buf.write("\n".join(lines))
| apache-2.0 |
jorge2703/scikit-learn | sklearn/tests/test_cross_validation.py | 19 | 44125 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1./3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
cpcloud/dask | dask/dataframe/tests/test_dataframe.py | 1 | 95806 | import sys
from copy import copy
from operator import getitem, add
from itertools import product
import pandas as pd
import pandas.util.testing as tm
import numpy as np
import pytest
import dask
from dask.async import get_sync
from dask import delayed
from dask.utils import ignoring, put_lines
import dask.dataframe as dd
from dask.dataframe.core import repartition_divisions, aca, _concat, Scalar
from dask.dataframe.methods import boundary_slice
from dask.dataframe.utils import assert_eq, make_meta, assert_max_deps
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]},
index=[9, 9, 9])}
meta = make_meta({'a': 'i8', 'b': 'i8'}, index=pd.Index([], 'i8'))
d = dd.DataFrame(dsk, 'x', meta, [0, 5, 9, 9])
full = d.compute()
def test_Dataframe():
expected = pd.Series([2, 3, 4, 5, 6, 7, 8, 9, 10],
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
name='a')
assert_eq(d['a'] + 1, expected)
tm.assert_index_equal(d.columns, pd.Index(['a', 'b']))
assert_eq(d[d['b'] > 2], full[full['b'] > 2])
assert_eq(d[['a', 'b']], full[['a', 'b']])
assert_eq(d.a, full.a)
assert d.b.mean().compute() == full.b.mean()
assert np.allclose(d.b.var().compute(), full.b.var())
assert np.allclose(d.b.std().compute(), full.b.std())
assert d.index._name == d.index._name # this is deterministic
assert repr(d)
def test_head_tail():
assert_eq(d.head(2), full.head(2))
assert_eq(d.head(3), full.head(3))
assert_eq(d.head(2), dsk[('x', 0)].head(2))
assert_eq(d['a'].head(2), full['a'].head(2))
assert_eq(d['a'].head(3), full['a'].head(3))
assert_eq(d['a'].head(2), dsk[('x', 0)]['a'].head(2))
assert (sorted(d.head(2, compute=False).dask) ==
sorted(d.head(2, compute=False).dask))
assert (sorted(d.head(2, compute=False).dask) !=
sorted(d.head(3, compute=False).dask))
assert_eq(d.tail(2), full.tail(2))
assert_eq(d.tail(3), full.tail(3))
assert_eq(d.tail(2), dsk[('x', 2)].tail(2))
assert_eq(d['a'].tail(2), full['a'].tail(2))
assert_eq(d['a'].tail(3), full['a'].tail(3))
assert_eq(d['a'].tail(2), dsk[('x', 2)]['a'].tail(2))
assert (sorted(d.tail(2, compute=False).dask) ==
sorted(d.tail(2, compute=False).dask))
assert (sorted(d.tail(2, compute=False).dask) !=
sorted(d.tail(3, compute=False).dask))
def test_head_npartitions():
assert_eq(d.head(5, npartitions=2), full.head(5))
assert_eq(d.head(5, npartitions=2, compute=False), full.head(5))
assert_eq(d.head(5, npartitions=-1), full.head(5))
assert_eq(d.head(7, npartitions=-1), full.head(7))
assert_eq(d.head(2, npartitions=-1), full.head(2))
with pytest.raises(ValueError):
d.head(2, npartitions=5)
@pytest.mark.skipif(sys.version_info[:2] == (3, 3),
reason="Python3.3 uses pytest2.7.2, w/o warns method")
def test_head_npartitions_warn():
with pytest.warns(None):
d.head(100)
with pytest.warns(None):
d.head(7)
with pytest.warns(None):
d.head(7, npartitions=2)
def test_index_head():
assert_eq(d.index.head(2), full.index[:2])
assert_eq(d.index.head(3), full.index[:3])
def test_Series():
assert isinstance(d.a, dd.Series)
assert isinstance(d.a + 1, dd.Series)
assert_eq((d + 1), full + 1)
def test_Index():
for case in [pd.DataFrame(np.random.randn(10, 5), index=list('abcdefghij')),
pd.DataFrame(np.random.randn(10, 5),
index=pd.date_range('2011-01-01', freq='D',
periods=10))]:
ddf = dd.from_pandas(case, 3)
assert_eq(ddf.index, case.index)
pytest.raises(AttributeError, lambda: ddf.index.index)
def test_Scalar():
val = np.int64(1)
s = Scalar({('a', 0): val}, 'a', 'i8')
assert hasattr(s, 'dtype')
assert 'dtype' in dir(s)
assert_eq(s, val)
assert repr(s) == "dd.Scalar<a, dtype=int64>"
val = pd.Timestamp('2001-01-01')
s = Scalar({('a', 0): val}, 'a', val)
assert not hasattr(s, 'dtype')
assert 'dtype' not in dir(s)
assert_eq(s, val)
assert repr(s) == "dd.Scalar<a, type=Timestamp>"
def test_attributes():
assert 'a' in dir(d)
assert 'foo' not in dir(d)
pytest.raises(AttributeError, lambda: d.foo)
df = dd.from_pandas(pd.DataFrame({'a b c': [1, 2, 3]}), npartitions=2)
assert 'a b c' not in dir(df)
df = dd.from_pandas(pd.DataFrame({'a': [1, 2], 5: [1, 2]}), npartitions=2)
assert 'a' in dir(df)
assert 5 not in dir(df)
df = dd.from_pandas(tm.makeTimeDataFrame(), npartitions=3)
pytest.raises(AttributeError, lambda: df.foo)
def test_column_names():
tm.assert_index_equal(d.columns, pd.Index(['a', 'b']))
tm.assert_index_equal(d[['b', 'a']].columns, pd.Index(['b', 'a']))
assert d['a'].name == 'a'
assert (d['a'] + 1).name == 'a'
assert (d['a'] + d['b']).name is None
def test_index_names():
assert d.index.name is None
idx = pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name='x')
df = pd.DataFrame(np.random.randn(10, 5), idx)
ddf = dd.from_pandas(df, 3)
assert ddf.index.name == 'x'
assert ddf.index.compute().name == 'x'
def test_set_index():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 2, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 5, 8]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [9, 1, 8]},
index=[9, 9, 9])}
d = dd.DataFrame(dsk, 'x', meta, [0, 4, 9, 9])
full = d.compute()
d2 = d.set_index('b', npartitions=3)
assert d2.npartitions == 3
assert d2.index.name == 'b'
assert_eq(d2, full.set_index('b'))
d3 = d.set_index(d.b, npartitions=3)
assert d3.npartitions == 3
assert d3.index.name == 'b'
assert_eq(d3, full.set_index(full.b))
d4 = d.set_index('b')
assert d4.index.name == 'b'
assert_eq(d4, full.set_index('b'))
def test_set_index_interpolate():
df = pd.DataFrame({'x': [4, 1, 1, 3, 3], 'y': [1., 1, 1, 1, 2]})
d = dd.from_pandas(df, 2)
d1 = d.set_index('x', npartitions=3)
assert d1.npartitions == 3
assert set(d1.divisions) == set([1, 2, 3, 4])
d2 = d.set_index('y', npartitions=3)
assert d2.divisions[0] == 1.
assert 1. < d2.divisions[1] < d2.divisions[2] < 2.
assert d2.divisions[3] == 2.
def test_set_index_interpolate_int():
L = sorted(list(range(0, 200, 10)) * 2)
df = pd.DataFrame({'x': 2 * L})
d = dd.from_pandas(df, 2)
d1 = d.set_index('x', npartitions=10)
assert all(np.issubdtype(type(x), np.integer) for x in d1.divisions)
def test_set_index_timezone():
s_naive = pd.Series(pd.date_range('20130101', periods=3))
s_aware = pd.Series(pd.date_range('20130101', periods=3, tz='US/Eastern'))
df = pd.DataFrame({'tz': s_aware, 'notz': s_naive})
d = dd.from_pandas(df, 2)
d1 = d.set_index('notz', npartitions=2)
s1 = pd.DatetimeIndex(s_naive.values, dtype=s_naive.dtype)
assert d1.divisions[0] == s_naive[0] == s1[0]
assert d1.divisions[-1] == s_naive[2] == s1[2]
# We currently lose "freq". Converting data with pandas-defined dtypes
# to numpy or pure Python can be lossy like this.
d2 = d.set_index('tz', npartitions=2)
s2 = pd.DatetimeIndex(s_aware, dtype=s_aware.dtype)
assert d2.divisions[0] == s2[0]
assert d2.divisions[-1] == s2[2]
assert d2.divisions[0].tz == s2[0].tz
assert d2.divisions[0].tz is not None
s2badtype = pd.DatetimeIndex(s_aware.values, dtype=s_naive.dtype)
with pytest.raises(TypeError):
d2.divisions[0] == s2badtype[0]
@pytest.mark.parametrize(
'npartitions',
[1, pytest.mark.xfail(2, reason='pandas join removes freq')]
)
def test_timezone_freq(npartitions):
s_naive = pd.Series(pd.date_range('20130101', periods=10))
s_aware = pd.Series(pd.date_range('20130101', periods=10, tz='US/Eastern'))
pdf = pd.DataFrame({'tz': s_aware, 'notz': s_naive})
ddf = dd.from_pandas(pdf, npartitions=npartitions)
assert pdf.tz[0].freq == ddf.compute().tz[0].freq == ddf.tz.compute()[0].freq
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_drop(drop):
pdf = pd.DataFrame({'A': list('ABAABBABAA'),
'B': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'C': [1, 2, 3, 2, 1, 3, 2, 4, 2, 3]})
ddf = dd.from_pandas(pdf, 3)
assert_eq(ddf.set_index('A', drop=drop),
pdf.set_index('A', drop=drop))
assert_eq(ddf.set_index('B', drop=drop),
pdf.set_index('B', drop=drop))
assert_eq(ddf.set_index('C', drop=drop),
pdf.set_index('C', drop=drop))
assert_eq(ddf.set_index(ddf.A, drop=drop),
pdf.set_index(pdf.A, drop=drop))
assert_eq(ddf.set_index(ddf.B, drop=drop),
pdf.set_index(pdf.B, drop=drop))
assert_eq(ddf.set_index(ddf.C, drop=drop),
pdf.set_index(pdf.C, drop=drop))
# numeric columns
pdf = pd.DataFrame({0: list('ABAABBABAA'),
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
2: [1, 2, 3, 2, 1, 3, 2, 4, 2, 3]})
ddf = dd.from_pandas(pdf, 3)
assert_eq(ddf.set_index(0, drop=drop),
pdf.set_index(0, drop=drop))
assert_eq(ddf.set_index(2, drop=drop),
pdf.set_index(2, drop=drop))
def test_set_index_raises_error_on_bad_input():
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]})
ddf = dd.from_pandas(df, 2)
msg = r"Dask dataframe does not yet support multi-indexes"
with tm.assertRaisesRegexp(NotImplementedError, msg):
ddf.set_index(['a', 'b'])
def test_rename_columns():
# GH 819
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]})
ddf = dd.from_pandas(df, 2)
ddf.columns = ['x', 'y']
df.columns = ['x', 'y']
tm.assert_index_equal(ddf.columns, pd.Index(['x', 'y']))
tm.assert_index_equal(ddf._meta.columns, pd.Index(['x', 'y']))
assert_eq(ddf, df)
msg = r"Length mismatch: Expected axis has 2 elements, new values have 4 elements"
with tm.assertRaisesRegexp(ValueError, msg):
ddf.columns = [1, 2, 3, 4]
# Multi-index columns
df = pd.DataFrame({('A', '0') : [1, 2, 2, 3], ('B', 1) : [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
df.columns = ['x', 'y']
ddf.columns = ['x', 'y']
tm.assert_index_equal(ddf.columns, pd.Index(['x', 'y']))
tm.assert_index_equal(ddf._meta.columns, pd.Index(['x', 'y']))
assert_eq(ddf, df)
def test_rename_series():
# GH 819
s = pd.Series([1, 2, 3, 4, 5, 6, 7], name='x')
ds = dd.from_pandas(s, 2)
s.name = 'renamed'
ds.name = 'renamed'
assert s.name == 'renamed'
assert_eq(ds, s)
ind = s.index
dind = ds.index
ind.name = 'renamed'
dind.name = 'renamed'
assert ind.name == 'renamed'
assert_eq(dind, ind)
def test_describe():
# prepare test case which approx quantiles will be the same as actuals
s = pd.Series(list(range(20)) * 4)
df = pd.DataFrame({'a': list(range(20)) * 4, 'b': list(range(4)) * 20})
ds = dd.from_pandas(s, 4)
ddf = dd.from_pandas(df, 4)
assert_eq(s.describe(), ds.describe())
assert_eq(df.describe(), ddf.describe())
assert_eq(s.describe(), ds.describe(split_every=2))
assert_eq(df.describe(), ddf.describe(split_every=2))
assert ds.describe(split_every=2)._name != ds.describe()._name
assert ddf.describe(split_every=2)._name != ddf.describe()._name
# remove string columns
df = pd.DataFrame({'a': list(range(20)) * 4, 'b': list(range(4)) * 20,
'c': list('abcd') * 20})
ddf = dd.from_pandas(df, 4)
assert_eq(df.describe(), ddf.describe())
assert_eq(df.describe(), ddf.describe(split_every=2))
def test_cumulative():
df = pd.DataFrame(np.random.randn(100, 5), columns=list('abcde'))
ddf = dd.from_pandas(df, 5)
assert_eq(ddf.cumsum(), df.cumsum())
assert_eq(ddf.cumprod(), df.cumprod())
assert_eq(ddf.cummin(), df.cummin())
assert_eq(ddf.cummax(), df.cummax())
assert_eq(ddf.cumsum(axis=1), df.cumsum(axis=1))
assert_eq(ddf.cumprod(axis=1), df.cumprod(axis=1))
assert_eq(ddf.cummin(axis=1), df.cummin(axis=1))
assert_eq(ddf.cummax(axis=1), df.cummax(axis=1))
assert_eq(ddf.a.cumsum(), df.a.cumsum())
assert_eq(ddf.a.cumprod(), df.a.cumprod())
assert_eq(ddf.a.cummin(), df.a.cummin())
assert_eq(ddf.a.cummax(), df.a.cummax())
# With NaNs
df = pd.DataFrame({'a': [1, 2, np.nan, 4, 5, 6, 7, 8],
'b': [1, 2, np.nan, np.nan, np.nan, 5, np.nan, np.nan],
'c': [np.nan] * 8})
ddf = dd.from_pandas(df, 3)
assert_eq(df.cumsum(), ddf.cumsum())
assert_eq(df.cummin(), ddf.cummin())
assert_eq(df.cummax(), ddf.cummax())
assert_eq(df.cumprod(), ddf.cumprod())
assert_eq(df.cumsum(skipna=False), ddf.cumsum(skipna=False))
assert_eq(df.cummin(skipna=False), ddf.cummin(skipna=False))
assert_eq(df.cummax(skipna=False), ddf.cummax(skipna=False))
assert_eq(df.cumprod(skipna=False), ddf.cumprod(skipna=False))
assert_eq(df.cumsum(axis=1), ddf.cumsum(axis=1))
assert_eq(df.cummin(axis=1), ddf.cummin(axis=1))
assert_eq(df.cummax(axis=1), ddf.cummax(axis=1))
assert_eq(df.cumprod(axis=1), ddf.cumprod(axis=1))
assert_eq(df.cumsum(axis=1, skipna=False), ddf.cumsum(axis=1, skipna=False))
assert_eq(df.cummin(axis=1, skipna=False), ddf.cummin(axis=1, skipna=False))
assert_eq(df.cummax(axis=1, skipna=False), ddf.cummax(axis=1, skipna=False))
assert_eq(df.cumprod(axis=1, skipna=False), ddf.cumprod(axis=1, skipna=False))
def test_dropna():
df = pd.DataFrame({'x': [np.nan, 2, 3, 4, np.nan, 6],
'y': [1, 2, np.nan, 4, np.nan, np.nan],
'z': [1, 2, 3, 4, np.nan, np.nan]},
index=[10, 20, 30, 40, 50, 60])
ddf = dd.from_pandas(df, 3)
assert_eq(ddf.x.dropna(), df.x.dropna())
assert_eq(ddf.y.dropna(), df.y.dropna())
assert_eq(ddf.z.dropna(), df.z.dropna())
assert_eq(ddf.dropna(), df.dropna())
assert_eq(ddf.dropna(how='all'), df.dropna(how='all'))
assert_eq(ddf.dropna(subset=['x']), df.dropna(subset=['x']))
assert_eq(ddf.dropna(subset=['y', 'z']), df.dropna(subset=['y', 'z']))
assert_eq(ddf.dropna(subset=['y', 'z'], how='all'),
df.dropna(subset=['y', 'z'], how='all'))
@pytest.mark.parametrize('lower, upper', [(2, 5), (2.5, 3.5)])
def test_clip(lower, upper):
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [3, 5, 2, 5, 7, 2, 4, 2, 4]})
ddf = dd.from_pandas(df, 3)
s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9])
ds = dd.from_pandas(s, 3)
assert_eq(ddf.clip(lower=lower, upper=upper),
df.clip(lower=lower, upper=upper))
assert_eq(ddf.clip(lower=lower), df.clip(lower=lower))
assert_eq(ddf.clip(upper=upper), df.clip(upper=upper))
assert_eq(ds.clip(lower=lower, upper=upper),
s.clip(lower=lower, upper=upper))
assert_eq(ds.clip(lower=lower), s.clip(lower=lower))
assert_eq(ds.clip(upper=upper), s.clip(upper=upper))
assert_eq(ddf.clip_lower(lower), df.clip_lower(lower))
assert_eq(ddf.clip_lower(upper), df.clip_lower(upper))
assert_eq(ddf.clip_upper(lower), df.clip_upper(lower))
assert_eq(ddf.clip_upper(upper), df.clip_upper(upper))
assert_eq(ds.clip_lower(lower), s.clip_lower(lower))
assert_eq(ds.clip_lower(upper), s.clip_lower(upper))
assert_eq(ds.clip_upper(lower), s.clip_upper(lower))
assert_eq(ds.clip_upper(upper), s.clip_upper(upper))
def test_where_mask():
pdf1 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [3, 5, 2, 5, 7, 2, 4, 2, 4]})
ddf1 = dd.from_pandas(pdf1, 2)
pdf2 = pd.DataFrame({'a': [True, False, True] * 3,
'b': [False, False, True] * 3})
ddf2 = dd.from_pandas(pdf2, 2)
# different index
pdf3 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [3, 5, 2, 5, 7, 2, 4, 2, 4]},
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
ddf3 = dd.from_pandas(pdf3, 2)
pdf4 = pd.DataFrame({'a': [True, False, True] * 3,
'b': [False, False, True] * 3},
index=[5, 6, 7, 8, 9, 10, 11, 12, 13])
ddf4 = dd.from_pandas(pdf4, 2)
# different columns
pdf5 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [9, 4, 2, 6, 2, 3, 1, 6, 2],
'c': [5, 6, 7, 8, 9, 10, 11, 12, 13]},
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
ddf5 = dd.from_pandas(pdf5, 2)
pdf6 = pd.DataFrame({'a': [True, False, True] * 3,
'b': [False, False, True] * 3,
'd': [False] * 9,
'e': [True] * 9},
index=[5, 6, 7, 8, 9, 10, 11, 12, 13])
ddf6 = dd.from_pandas(pdf6, 2)
cases = [(ddf1, ddf2, pdf1, pdf2),
(ddf1.repartition([0, 3, 6, 8]), ddf2, pdf1, pdf2),
(ddf1, ddf4, pdf3, pdf4),
(ddf3.repartition([0, 4, 6, 8]), ddf4.repartition([5, 9, 10, 13]),
pdf3, pdf4),
(ddf5, ddf6, pdf5, pdf6),
(ddf5.repartition([0, 4, 7, 8]), ddf6, pdf5, pdf6),
# use pd.DataFrame as cond
(ddf1, pdf2, pdf1, pdf2),
(ddf1, pdf4, pdf3, pdf4),
(ddf5, pdf6, pdf5, pdf6)]
for ddf, ddcond, pdf, pdcond in cases:
assert isinstance(ddf, dd.DataFrame)
assert isinstance(ddcond, (dd.DataFrame, pd.DataFrame))
assert isinstance(pdf, pd.DataFrame)
assert isinstance(pdcond, pd.DataFrame)
assert_eq(ddf.where(ddcond), pdf.where(pdcond))
assert_eq(ddf.mask(ddcond), pdf.mask(pdcond))
assert_eq(ddf.where(ddcond, -ddf), pdf.where(pdcond, -pdf))
assert_eq(ddf.mask(ddcond, -ddf), pdf.mask(pdcond, -pdf))
# ToDo: Should work on pandas 0.17
# https://github.com/pydata/pandas/pull/10283
# assert_eq(ddf.where(ddcond.a, -ddf), pdf.where(pdcond.a, -pdf))
# assert_eq(ddf.mask(ddcond.a, -ddf), pdf.mask(pdcond.a, -pdf))
assert_eq(ddf.a.where(ddcond.a), pdf.a.where(pdcond.a))
assert_eq(ddf.a.mask(ddcond.a), pdf.a.mask(pdcond.a))
assert_eq(ddf.a.where(ddcond.a, -ddf.a), pdf.a.where(pdcond.a, -pdf.a))
assert_eq(ddf.a.mask(ddcond.a, -ddf.a), pdf.a.mask(pdcond.a, -pdf.a))
def test_map_partitions_multi_argument():
assert_eq(dd.map_partitions(lambda a, b: a + b, d.a, d.b),
full.a + full.b)
assert_eq(dd.map_partitions(lambda a, b, c: a + b + c, d.a, d.b, 1),
full.a + full.b + 1)
def test_map_partitions():
assert_eq(d.map_partitions(lambda df: df, meta=d), full)
assert_eq(d.map_partitions(lambda df: df), full)
result = d.map_partitions(lambda df: df.sum(axis=1))
assert_eq(result, full.sum(axis=1))
assert_eq(d.map_partitions(lambda df: 1), pd.Series([1, 1, 1], dtype=np.int64),
check_divisions=False)
x = Scalar({('x', 0): 1}, 'x', int)
result = dd.map_partitions(lambda x: 2, x)
assert result.dtype in (np.int32, np.int64) and result.compute() == 2
result = dd.map_partitions(lambda x: 4.0, x)
assert result.dtype == np.float64 and result.compute() == 4.0
def test_map_partitions_names():
func = lambda x: x
assert (sorted(dd.map_partitions(func, d, meta=d).dask) ==
sorted(dd.map_partitions(func, d, meta=d).dask))
assert (sorted(dd.map_partitions(lambda x: x, d, meta=d, token=1).dask) ==
sorted(dd.map_partitions(lambda x: x, d, meta=d, token=1).dask))
func = lambda x, y: x
assert (sorted(dd.map_partitions(func, d, d, meta=d).dask) ==
sorted(dd.map_partitions(func, d, d, meta=d).dask))
def test_map_partitions_column_info():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
b = dd.map_partitions(lambda x: x, a, meta=a)
tm.assert_index_equal(b.columns, a.columns)
assert_eq(df, b)
b = dd.map_partitions(lambda x: x, a.x, meta=a.x)
assert b.name == a.x.name
assert_eq(df.x, b)
b = dd.map_partitions(lambda x: x, a.x, meta=a.x)
assert b.name == a.x.name
assert_eq(df.x, b)
b = dd.map_partitions(lambda df: df.x + df.y, a)
assert isinstance(b, dd.Series)
assert b.dtype == 'i8'
b = dd.map_partitions(lambda df: df.x + 1, a, meta=('x', 'i8'))
assert isinstance(b, dd.Series)
assert b.name == 'x'
assert b.dtype == 'i8'
def test_map_partitions_method_names():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
b = a.map_partitions(lambda x: x)
assert isinstance(b, dd.DataFrame)
tm.assert_index_equal(b.columns, a.columns)
b = a.map_partitions(lambda df: df.x + 1)
assert isinstance(b, dd.Series)
assert b.dtype == 'i8'
b = a.map_partitions(lambda df: df.x + 1, meta=('x', 'i8'))
assert isinstance(b, dd.Series)
assert b.name == 'x'
assert b.dtype == 'i8'
def test_map_partitions_keeps_kwargs_in_dict():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
def f(s, x=1):
return s + x
b = a.x.map_partitions(f, x=5)
assert "'x': 5" in str(b.dask)
assert_eq(df.x + 5, b)
assert a.x.map_partitions(f, x=5)._name != a.x.map_partitions(f, x=6)._name
def test_drop_duplicates():
res = d.drop_duplicates()
res2 = d.drop_duplicates(split_every=2)
sol = full.drop_duplicates()
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
res = d.a.drop_duplicates()
res2 = d.a.drop_duplicates(split_every=2)
sol = full.a.drop_duplicates()
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
res = d.index.drop_duplicates()
res2 = d.index.drop_duplicates(split_every=2)
sol = full.index.drop_duplicates()
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
with pytest.raises(NotImplementedError):
d.drop_duplicates(keep=False)
def test_drop_duplicates_subset():
df = pd.DataFrame({'x': [1, 2, 3, 1, 2, 3],
'y': ['a', 'a', 'b', 'b', 'c', 'c']})
ddf = dd.from_pandas(df, npartitions=2)
for kwarg in [{'keep': 'first'}, {'keep': 'last'}]:
assert_eq(df.x.drop_duplicates(**kwarg),
ddf.x.drop_duplicates(**kwarg))
for ss in [['x'], 'y', ['x', 'y']]:
assert_eq(df.drop_duplicates(subset=ss, **kwarg),
ddf.drop_duplicates(subset=ss, **kwarg))
def test_set_partition():
d2 = d.set_index('b', divisions=[0, 2, 9])
assert d2.divisions == (0, 2, 9)
expected = full.set_index('b')
assert_eq(d2, expected)
def test_set_partition_compute():
d2 = d.set_index('b', divisions=[0, 2, 9], compute=False)
d3 = d.set_index('b', divisions=[0, 2, 9], compute=True)
assert_eq(d2, d3)
assert_eq(d2, full.set_index('b'))
assert_eq(d3, full.set_index('b'))
assert len(d2.dask) > len(d3.dask)
d4 = d.set_index(d.b, divisions=[0, 2, 9], compute=False)
d5 = d.set_index(d.b, divisions=[0, 2, 9], compute=True)
exp = full.copy()
exp.index = exp.b
assert_eq(d4, d5)
assert_eq(d4, exp)
assert_eq(d5, exp)
assert len(d4.dask) > len(d5.dask)
def test_get_partition():
pdf = pd.DataFrame(np.random.randn(10, 5), columns=list('abcde'))
ddf = dd.from_pandas(pdf, 3)
assert ddf.divisions == (0, 4, 8, 9)
# DataFrame
div1 = ddf.get_partition(0)
assert isinstance(div1, dd.DataFrame)
assert_eq(div1, pdf.loc[0:3])
div2 = ddf.get_partition(1)
assert_eq(div2, pdf.loc[4:7])
div3 = ddf.get_partition(2)
assert_eq(div3, pdf.loc[8:9])
assert len(div1) + len(div2) + len(div3) == len(pdf)
# Series
div1 = ddf.a.get_partition(0)
assert isinstance(div1, dd.Series)
assert_eq(div1, pdf.a.loc[0:3])
div2 = ddf.a.get_partition(1)
assert_eq(div2, pdf.a.loc[4:7])
div3 = ddf.a.get_partition(2)
assert_eq(div3, pdf.a.loc[8:9])
assert len(div1) + len(div2) + len(div3) == len(pdf.a)
with tm.assertRaises(ValueError):
ddf.get_partition(-1)
with tm.assertRaises(ValueError):
ddf.get_partition(3)
def test_ndim():
assert (d.ndim == 2)
assert (d.a.ndim == 1)
assert (d.index.ndim == 1)
def test_dtype():
assert (d.dtypes == full.dtypes).all()
def test_cache():
d2 = d.cache()
assert all(task[0] == getitem for task in d2.dask.values())
assert_eq(d2.a, d.a)
def test_value_counts():
df = pd.DataFrame({'x': [1, 2, 1, 3, 3, 1, 4]})
ddf = dd.from_pandas(df, npartitions=3)
result = ddf.x.value_counts()
expected = df.x.value_counts()
assert_eq(result, expected)
result2 = ddf.x.value_counts(split_every=2)
assert_eq(result2, expected)
assert result._name != result2._name
def test_unique():
pdf = pd.DataFrame({'x': [1, 2, 1, 3, 3, 1, 4, 2, 3, 1],
'y': ['a', 'c', 'b', np.nan, 'c',
'b', 'a', 'd', np.nan, 'a']})
ddf = dd.from_pandas(pdf, npartitions=3)
assert_eq(ddf.x.unique(), pd.Series(pdf.x.unique(), name='x'))
assert_eq(ddf.y.unique(), pd.Series(pdf.y.unique(), name='y'))
assert_eq(ddf.x.unique(split_every=2),
pd.Series(pdf.x.unique(), name='x'))
assert_eq(ddf.y.unique(split_every=2),
pd.Series(pdf.y.unique(), name='y'))
assert ddf.x.unique(split_every=2)._name != ddf.x.unique()._name
def test_isin():
assert_eq(d.a.isin([0, 1, 2]), full.a.isin([0, 1, 2]))
assert_eq(d.a.isin(pd.Series([0, 1, 2])),
full.a.isin(pd.Series([0, 1, 2])))
def test_len():
assert len(d) == len(full)
assert len(d.a) == len(full.a)
def test_size():
assert_eq(d.size, full.size)
assert_eq(d.a.size, full.a.size)
assert_eq(d.index.size, full.index.size)
def test_nbytes():
assert_eq(d.a.nbytes, full.a.nbytes)
assert_eq(d.index.nbytes, full.index.nbytes)
def test_quantile():
# series / multiple
result = d.b.quantile([.3, .7])
exp = full.b.quantile([.3, .7]) # result may different
assert len(result) == 2
assert result.divisions == (.3, .7)
assert_eq(result.index, exp.index)
assert isinstance(result, dd.Series)
result = result.compute()
assert isinstance(result, pd.Series)
assert result.iloc[0] == 0
assert 5 < result.iloc[1] < 6
# index
s = pd.Series(np.arange(10), index=np.arange(10))
ds = dd.from_pandas(s, 2)
result = ds.index.quantile([.3, .7])
exp = s.quantile([.3, .7])
assert len(result) == 2
assert result.divisions == (.3, .7)
assert_eq(result.index, exp.index)
assert isinstance(result, dd.Series)
result = result.compute()
assert isinstance(result, pd.Series)
assert 1 < result.iloc[0] < 2
assert 7 < result.iloc[1] < 8
# series / single
result = d.b.quantile(.5)
exp = full.b.quantile(.5) # result may different
assert isinstance(result, dd.core.Scalar)
result = result.compute()
assert 4 < result < 6
def test_empty_quantile():
result = d.b.quantile([])
exp = full.b.quantile([])
assert result.divisions == (None, None)
# because of a pandas bug, name is not preserved
# https://github.com/pydata/pandas/pull/10881
assert result.name == 'b'
assert result.compute().name == 'b'
assert_eq(result, exp, check_names=False)
def test_dataframe_quantile():
# column X is for test column order and result division
df = pd.DataFrame({'A': np.arange(20),
'X': np.arange(20, 40),
'B': np.arange(10, 30),
'C': ['a', 'b', 'c', 'd'] * 5},
columns=['A', 'X', 'B', 'C'])
ddf = dd.from_pandas(df, 3)
result = ddf.quantile()
assert result.npartitions == 1
assert result.divisions == ('A', 'X')
result = result.compute()
assert isinstance(result, pd.Series)
tm.assert_index_equal(result.index, pd.Index(['A', 'X', 'B']))
assert (result > pd.Series([16, 36, 26], index=['A', 'X', 'B'])).all()
assert (result < pd.Series([17, 37, 27], index=['A', 'X', 'B'])).all()
result = ddf.quantile([0.25, 0.75])
assert result.npartitions == 1
assert result.divisions == (0.25, 0.75)
result = result.compute()
assert isinstance(result, pd.DataFrame)
tm.assert_index_equal(result.index, pd.Index([0.25, 0.75]))
tm.assert_index_equal(result.columns, pd.Index(['A', 'X', 'B']))
minexp = pd.DataFrame([[1, 21, 11], [17, 37, 27]],
index=[0.25, 0.75], columns=['A', 'X', 'B'])
assert (result > minexp).all().all()
maxexp = pd.DataFrame([[2, 22, 12], [18, 38, 28]],
index=[0.25, 0.75], columns=['A', 'X', 'B'])
assert (result < maxexp).all().all()
assert_eq(ddf.quantile(axis=1), df.quantile(axis=1))
pytest.raises(ValueError, lambda: ddf.quantile([0.25, 0.75], axis=1))
def test_index():
assert_eq(d.index, full.index)
def test_assign():
d_unknown = dd.from_pandas(full, npartitions=3, sort=False)
assert not d_unknown.known_divisions
res = d.assign(c=1,
d='string',
e=d.a.sum(),
f=d.a + d.b)
res_unknown = d_unknown.assign(c=1,
d='string',
e=d_unknown.a.sum(),
f=d_unknown.a + d_unknown.b)
sol = full.assign(c=1,
d='string',
e=full.a.sum(),
f=full.a + full.b)
assert_eq(res, sol)
assert_eq(res_unknown, sol)
res = d.assign(c=full.a + 1)
assert_eq(res, full.assign(c=full.a + 1))
# divisions unknown won't work with pandas
with pytest.raises(ValueError):
d_unknown.assign(c=full.a + 1)
# unsupported type
with pytest.raises(TypeError):
d.assign(c=list(range(9)))
# Fails when assigning known divisions to unknown divisions
with pytest.raises(ValueError):
d_unknown.assign(foo=d.a)
# Fails when assigning unknown divisions to known divisions
with pytest.raises(ValueError):
d.assign(foo=d_unknown.a)
def test_map():
assert_eq(d.a.map(lambda x: x + 1), full.a.map(lambda x: x + 1))
lk = dict((v, v + 1) for v in full.a.values)
assert_eq(d.a.map(lk), full.a.map(lk))
assert_eq(d.b.map(lk), full.b.map(lk))
lk = pd.Series(lk)
assert_eq(d.a.map(lk), full.a.map(lk))
assert_eq(d.b.map(lk), full.b.map(lk))
assert_eq(d.b.map(lk, meta=d.b), full.b.map(lk))
assert_eq(d.b.map(lk, meta=('b', 'i8')), full.b.map(lk))
pytest.raises(TypeError, lambda: d.a.map(d.b))
def test_concat():
x = _concat([pd.DataFrame(columns=['a', 'b']),
pd.DataFrame(columns=['a', 'b'])])
assert list(x.columns) == ['a', 'b']
assert len(x) == 0
def test_args():
e = d.assign(c=d.a + 1)
f = type(e)(*e._args)
assert_eq(e, f)
assert_eq(d.a, type(d.a)(*d.a._args))
assert_eq(d.a.sum(), type(d.a.sum())(*d.a.sum()._args))
def test_known_divisions():
assert d.known_divisions
df = dd.DataFrame(dsk, 'x', meta, divisions=[None, None, None])
assert not df.known_divisions
def test_unknown_divisions():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]}),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]})}
meta = make_meta({'a': 'i8', 'b': 'i8'})
d = dd.DataFrame(dsk, 'x', meta, [None, None, None, None])
full = d.compute(get=dask.get)
assert_eq(d.a.sum(), full.a.sum())
assert_eq(d.a + d.b + 1, full.a + full.b + 1)
@pytest.mark.parametrize('join', ['inner', 'outer', 'left', 'right'])
def test_align(join):
df1a = pd.DataFrame({'A': np.random.randn(10),
'B': np.random.randn(10)},
index=[1, 12, 5, 6, 3, 9, 10, 4, 13, 11])
df1b = pd.DataFrame({'A': np.random.randn(10),
'B': np.random.randn(10)},
index=[0, 3, 2, 10, 5, 6, 7, 8, 12, 13])
ddf1a = dd.from_pandas(df1a, 3)
ddf1b = dd.from_pandas(df1b, 3)
# DataFrame
res1, res2 = ddf1a.align(ddf1b, join=join)
exp1, exp2 = df1a.align(df1b, join=join)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
# Series
res1, res2 = ddf1a['A'].align(ddf1b['B'], join=join)
exp1, exp2 = df1a['A'].align(df1b['B'], join=join)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
# DataFrame with fill_value
res1, res2 = ddf1a.align(ddf1b, join=join, fill_value=1)
exp1, exp2 = df1a.align(df1b, join=join, fill_value=1)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
# Series
res1, res2 = ddf1a['A'].align(ddf1b['B'], join=join, fill_value=1)
exp1, exp2 = df1a['A'].align(df1b['B'], join=join, fill_value=1)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
@pytest.mark.parametrize('join', ['inner', 'outer', 'left', 'right'])
def test_align_axis(join):
df1a = pd.DataFrame({'A': np.random.randn(10),
'B': np.random.randn(10),
'C': np.random.randn(10)},
index=[1, 12, 5, 6, 3, 9, 10, 4, 13, 11])
df1b = pd.DataFrame({'B': np.random.randn(10),
'C': np.random.randn(10),
'D': np.random.randn(10)},
index=[0, 3, 2, 10, 5, 6, 7, 8, 12, 13])
ddf1a = dd.from_pandas(df1a, 3)
ddf1b = dd.from_pandas(df1b, 3)
res1, res2 = ddf1a.align(ddf1b, join=join, axis=0)
exp1, exp2 = df1a.align(df1b, join=join, axis=0)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
res1, res2 = ddf1a.align(ddf1b, join=join, axis=1)
exp1, exp2 = df1a.align(df1b, join=join, axis=1)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
res1, res2 = ddf1a.align(ddf1b, join=join, axis='index')
exp1, exp2 = df1a.align(df1b, join=join, axis='index')
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
res1, res2 = ddf1a.align(ddf1b, join=join, axis='columns')
exp1, exp2 = df1a.align(df1b, join=join, axis='columns')
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
# invalid
with tm.assertRaises(ValueError):
ddf1a.align(ddf1b, join=join, axis='XXX')
with tm.assertRaises(ValueError):
ddf1a['A'].align(ddf1b['B'], join=join, axis=1)
def test_combine():
df1 = pd.DataFrame({'A': np.random.choice([1, 2, np.nan], 100),
'B': np.random.choice(['a', 'b', np.nan], 100)})
df2 = pd.DataFrame({'A': np.random.choice([1, 2, 3], 100),
'B': np.random.choice(['a', 'b', 'c'], 100)})
ddf1 = dd.from_pandas(df1, 4)
ddf2 = dd.from_pandas(df2, 5)
first = lambda a, b: a
# DataFrame
for da, db, a, b in [(ddf1, ddf2, df1, df2),
(ddf1.A, ddf2.A, df1.A, df2.A),
(ddf1.B, ddf2.B, df1.B, df2.B)]:
for func, fill_value in [(add, None), (add, 100), (first, None)]:
sol = a.combine(b, func, fill_value=fill_value)
assert_eq(da.combine(db, func, fill_value=fill_value), sol)
assert_eq(da.combine(b, func, fill_value=fill_value), sol)
assert_eq(ddf1.combine(ddf2, add, overwrite=False),
df1.combine(df2, add, overwrite=False))
assert da.combine(db, add)._name == da.combine(db, add)._name
def test_combine_first():
df1 = pd.DataFrame({'A': np.random.choice([1, 2, np.nan], 100),
'B': np.random.choice(['a', 'b', np.nan], 100)})
df2 = pd.DataFrame({'A': np.random.choice([1, 2, 3], 100),
'B': np.random.choice(['a', 'b', 'c'], 100)})
ddf1 = dd.from_pandas(df1, 4)
ddf2 = dd.from_pandas(df2, 5)
# DataFrame
assert_eq(ddf1.combine_first(ddf2), df1.combine_first(df2))
assert_eq(ddf1.combine_first(df2), df1.combine_first(df2))
# Series
assert_eq(ddf1.A.combine_first(ddf2.A), df1.A.combine_first(df2.A))
assert_eq(ddf1.A.combine_first(df2.A), df1.A.combine_first(df2.A))
assert_eq(ddf1.B.combine_first(ddf2.B), df1.B.combine_first(df2.B))
assert_eq(ddf1.B.combine_first(df2.B), df1.B.combine_first(df2.B))
def test_dataframe_picklable():
from pickle import loads, dumps
cloudpickle = pytest.importorskip('cloudpickle')
cp_dumps = cloudpickle.dumps
d = tm.makeTimeDataFrame()
df = dd.from_pandas(d, npartitions=3)
df = df + 2
# dataframe
df2 = loads(dumps(df))
assert_eq(df, df2)
df2 = loads(cp_dumps(df))
assert_eq(df, df2)
# series
a2 = loads(dumps(df.A))
assert_eq(df.A, a2)
a2 = loads(cp_dumps(df.A))
assert_eq(df.A, a2)
# index
i2 = loads(dumps(df.index))
assert_eq(df.index, i2)
i2 = loads(cp_dumps(df.index))
assert_eq(df.index, i2)
# scalar
# lambdas are present, so only test cloudpickle
s = df.A.sum()
s2 = loads(cp_dumps(s))
assert_eq(s, s2)
def test_random_partitions():
a, b = d.random_split([0.5, 0.5], 42)
assert isinstance(a, dd.DataFrame)
assert isinstance(b, dd.DataFrame)
assert a._name != b._name
assert len(a.compute()) + len(b.compute()) == len(full)
a2, b2 = d.random_split([0.5, 0.5], 42)
assert a2._name == a._name
assert b2._name == b._name
parts = d.random_split([0.4, 0.5, 0.1], 42)
names = set([p._name for p in parts])
names.update([a._name, b._name])
assert len(names) == 5
with pytest.raises(ValueError):
d.random_split([0.4, 0.5], 42)
def test_series_round():
ps = pd.Series([1.123, 2.123, 3.123, 1.234, 2.234, 3.234], name='a')
s = dd.from_pandas(ps, npartitions=3)
assert_eq(s.round(), ps.round())
def test_set_partition_2():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')})
ddf = dd.from_pandas(df, 2)
result = ddf.set_index('y', divisions=['a', 'c', 'd'])
assert result.divisions == ('a', 'c', 'd')
assert list(result.compute(get=get_sync).index[-2:]) == ['d', 'd']
@pytest.mark.slow
def test_repartition():
def _check_split_data(orig, d):
"""Check data is split properly"""
keys = [k for k in d.dask if k[0].startswith('repartition-split')]
keys = sorted(keys)
sp = pd.concat([d._get(d.dask, k) for k in keys])
assert_eq(orig, sp)
assert_eq(orig, d)
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
b = a.repartition(divisions=[10, 20, 50, 60])
assert b.divisions == (10, 20, 50, 60)
assert_eq(a, b)
assert_eq(a._get(b.dask, (b._name, 0)), df.iloc[:1])
for div in [[20, 60], [10, 50], [1], # first / last element mismatch
[0, 60], [10, 70], # do not allow to expand divisions by default
[10, 50, 20, 60], # not sorted
[10, 10, 20, 60]]: # not unique (last element can be duplicated)
pytest.raises(ValueError, lambda: a.repartition(divisions=div))
pdf = pd.DataFrame(np.random.randn(7, 5), columns=list('abxyz'))
for p in range(1, 7):
ddf = dd.from_pandas(pdf, p)
assert_eq(ddf, pdf)
for div in [[0, 6], [0, 6, 6], [0, 5, 6], [0, 4, 6, 6],
[0, 2, 6], [0, 2, 6, 6],
[0, 2, 3, 6, 6], [0, 1, 2, 3, 4, 5, 6, 6]]:
rddf = ddf.repartition(divisions=div)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert_eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert_eq(pdf.x, rds)
# expand divisions
for div in [[-5, 10], [-2, 3, 5, 6], [0, 4, 5, 9, 10]]:
rddf = ddf.repartition(divisions=div, force=True)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert_eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div, force=True)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert_eq(pdf.x, rds)
pdf = pd.DataFrame({'x': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
'y': [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]},
index=list('abcdefghij'))
for p in range(1, 7):
ddf = dd.from_pandas(pdf, p)
assert_eq(ddf, pdf)
for div in [list('aj'), list('ajj'), list('adj'),
list('abfj'), list('ahjj'), list('acdj'), list('adfij'),
list('abdefgij'), list('abcdefghij')]:
rddf = ddf.repartition(divisions=div)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert_eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert_eq(pdf.x, rds)
# expand divisions
for div in [list('Yadijm'), list('acmrxz'), list('Yajz')]:
rddf = ddf.repartition(divisions=div, force=True)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert_eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div, force=True)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert_eq(pdf.x, rds)
def test_repartition_divisions():
result = repartition_divisions([0, 6], [0, 6, 6], 'a', 'b', 'c')
assert result == {('b', 0): (boundary_slice, ('a', 0), 0, 6, False),
('b', 1): (boundary_slice, ('a', 0), 6, 6, True),
('c', 0): ('b', 0),
('c', 1): ('b', 1)}
result = repartition_divisions([1, 3, 7], [1, 4, 6, 7], 'a', 'b', 'c')
assert result == {('b', 0): (boundary_slice, ('a', 0), 1, 3, False),
('b', 1): (boundary_slice, ('a', 1), 3, 4, False),
('b', 2): (boundary_slice, ('a', 1), 4, 6, False),
('b', 3): (boundary_slice, ('a', 1), 6, 7, True),
('c', 0): (pd.concat, [('b', 0), ('b', 1)]),
('c', 1): ('b', 2),
('c', 2): ('b', 3)}
def test_repartition_on_pandas_dataframe():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
ddf = dd.repartition(df, divisions=[10, 20, 50, 60])
assert isinstance(ddf, dd.DataFrame)
assert ddf.divisions == (10, 20, 50, 60)
assert_eq(ddf, df)
ddf = dd.repartition(df.y, divisions=[10, 20, 50, 60])
assert isinstance(ddf, dd.Series)
assert ddf.divisions == (10, 20, 50, 60)
assert_eq(ddf, df.y)
@pytest.mark.parametrize('use_index', [True, False])
@pytest.mark.parametrize('n', [1, 2, 4, 5])
@pytest.mark.parametrize('k', [1, 2, 4, 5])
@pytest.mark.parametrize('dtype', [int, float, 'M8[ns]'])
@pytest.mark.parametrize('transform', [lambda df: df, lambda df: df.x])
def test_repartition_npartitions(use_index, n, k, dtype, transform):
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6] * 10,
'y': list('abdabd') * 10},
index=pd.Series([10, 20, 30, 40, 50, 60] * 10, dtype=dtype))
df = transform(df)
a = dd.from_pandas(df, npartitions=n, sort=use_index)
b = a.repartition(npartitions=k)
assert_eq(a, b)
assert b.npartitions == k
parts = dask.get(b.dask, b._keys())
assert all(map(len, parts))
def test_repartition_object_index():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6] * 10},
index=list('abdabd') * 10)
a = dd.from_pandas(df, npartitions=5)
b = a.repartition(npartitions=2)
assert b.npartitions == 2
assert_eq(b, df)
b = a.repartition(npartitions=10)
assert b.npartitions == 10
assert_eq(b, df)
assert not b.known_divisions
@pytest.mark.slow
@pytest.mark.parametrize('npartitions', [1, 20, 243])
@pytest.mark.parametrize('freq', ['1D', '7D', '28h'])
@pytest.mark.parametrize('end', ['2000-04-15', '2000-04-15 12:37:01'])
@pytest.mark.parametrize('start', ['2000-01-01', '2000-01-01 12:30:00'])
def test_repartition_freq(npartitions, freq, start, end):
start = pd.Timestamp(start)
end = pd.Timestamp(end)
ind = pd.DatetimeIndex(start=start, end=end, freq='60s')
df = pd.DataFrame({'x': np.arange(len(ind))}, index=ind)
ddf = dd.from_pandas(df, npartitions=npartitions, name='x')
ddf2 = ddf.repartition(freq=freq)
assert_eq(ddf2, df)
def test_repartition_freq_divisions():
df = pd.DataFrame({'x': np.random.random(10)},
index=pd.DatetimeIndex(np.random.random(10) * 100e9))
ddf = dd.from_pandas(df, npartitions=3)
ddf2 = ddf.repartition(freq='15s')
for div in ddf2.divisions[1:-1]:
assert div == div.round('15s')
assert ddf2.divisions[0] == df.index.min()
assert ddf2.divisions[-1] == df.index.max()
assert_eq(ddf2, ddf2)
def test_repartition_freq_errors():
df = pd.DataFrame({'x': [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=1)
with pytest.raises(TypeError) as info:
ddf.repartition(freq='1s')
assert 'only' in str(info.value)
assert 'timeseries' in str(info.value)
def test_embarrassingly_parallel_operations():
df = pd.DataFrame({'x': [1, 2, 3, 4, None, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
assert_eq(a.x.astype('float32'), df.x.astype('float32'))
assert a.x.astype('float32').compute().dtype == 'float32'
assert_eq(a.x.dropna(), df.x.dropna())
assert_eq(a.x.between(2, 4), df.x.between(2, 4))
assert_eq(a.x.clip(2, 4), df.x.clip(2, 4))
assert_eq(a.x.notnull(), df.x.notnull())
assert_eq(a.x.isnull(), df.x.isnull())
assert_eq(a.notnull(), df.notnull())
assert_eq(a.isnull(), df.isnull())
assert len(a.sample(0.5).compute()) < len(df)
def test_fillna():
df = tm.makeMissingDataframe(0.8, 42)
ddf = dd.from_pandas(df, npartitions=5, sort=False)
assert_eq(ddf.fillna(100), df.fillna(100))
assert_eq(ddf.A.fillna(100), df.A.fillna(100))
assert_eq(ddf.fillna(method='pad'), df.fillna(method='pad'))
assert_eq(ddf.A.fillna(method='pad'), df.A.fillna(method='pad'))
assert_eq(ddf.fillna(method='bfill'), df.fillna(method='bfill'))
assert_eq(ddf.A.fillna(method='bfill'), df.A.fillna(method='bfill'))
assert_eq(ddf.fillna(method='pad', limit=2),
df.fillna(method='pad', limit=2))
assert_eq(ddf.A.fillna(method='pad', limit=2),
df.A.fillna(method='pad', limit=2))
assert_eq(ddf.fillna(method='bfill', limit=2),
df.fillna(method='bfill', limit=2))
assert_eq(ddf.A.fillna(method='bfill', limit=2),
df.A.fillna(method='bfill', limit=2))
assert_eq(ddf.fillna(100, axis=1), df.fillna(100, axis=1))
assert_eq(ddf.fillna(method='pad', axis=1), df.fillna(method='pad', axis=1))
assert_eq(ddf.fillna(method='pad', limit=2, axis=1),
df.fillna(method='pad', limit=2, axis=1))
pytest.raises(ValueError, lambda: ddf.A.fillna(0, axis=1))
pytest.raises(NotImplementedError, lambda: ddf.fillna(0, limit=10))
pytest.raises(NotImplementedError, lambda: ddf.fillna(0, limit=10, axis=1))
df = tm.makeMissingDataframe(0.2, 42)
ddf = dd.from_pandas(df, npartitions=5, sort=False)
pytest.raises(ValueError, lambda: ddf.fillna(method='pad').compute())
assert_eq(df.fillna(method='pad', limit=3),
ddf.fillna(method='pad', limit=3))
def test_fillna_multi_dataframe():
df = tm.makeMissingDataframe(0.8, 42)
ddf = dd.from_pandas(df, npartitions=5, sort=False)
assert_eq(ddf.A.fillna(ddf.B), df.A.fillna(df.B))
assert_eq(ddf.B.fillna(ddf.A), df.B.fillna(df.A))
def test_ffill_bfill():
df = tm.makeMissingDataframe(0.8, 42)
ddf = dd.from_pandas(df, npartitions=5, sort=False)
assert_eq(ddf.ffill(), df.ffill())
assert_eq(ddf.bfill(), df.bfill())
assert_eq(ddf.ffill(axis=1), df.ffill(axis=1))
assert_eq(ddf.bfill(axis=1), df.bfill(axis=1))
def test_sample():
df = pd.DataFrame({'x': [1, 2, 3, 4, None, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
b = a.sample(0.5)
assert_eq(b, b)
c = a.sample(0.5, random_state=1234)
d = a.sample(0.5, random_state=1234)
assert_eq(c, d)
assert a.sample(0.5)._name != a.sample(0.5)._name
def test_sample_without_replacement():
df = pd.DataFrame({'x': [1, 2, 3, 4, None, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
b = a.sample(0.7, replace=False)
bb = b.index.compute()
assert len(bb) == len(set(bb))
def test_datetime_accessor():
df = pd.DataFrame({'x': [1, 2, 3, 4]})
df['x'] = df.x.astype('M8[us]')
a = dd.from_pandas(df, 2)
assert 'date' in dir(a.x.dt)
# pandas loses Series.name via datetime accessor
# see https://github.com/pydata/pandas/issues/10712
assert_eq(a.x.dt.date, df.x.dt.date, check_names=False)
# to_pydatetime returns a numpy array in pandas, but a Series in dask
assert_eq(a.x.dt.to_pydatetime(),
pd.Series(df.x.dt.to_pydatetime(), index=df.index, dtype=object))
assert set(a.x.dt.date.dask) == set(a.x.dt.date.dask)
assert set(a.x.dt.to_pydatetime().dask) == set(a.x.dt.to_pydatetime().dask)
def test_str_accessor():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'D'], 'y': [1, 2, 3, 4]},
index=['e', 'f', 'g', 'H'])
a = dd.from_pandas(df, 2, sort=False)
# Check that str not in dir/hasattr for non-object columns
assert 'str' not in dir(a.y)
assert not hasattr(a.y, 'str')
assert 'upper' in dir(a.x.str)
assert_eq(a.x.str.upper(), df.x.str.upper())
assert set(a.x.str.upper().dask) == set(a.x.str.upper().dask)
assert 'upper' in dir(a.index.str)
assert_eq(a.index.str.upper(), df.index.str.upper())
assert set(a.index.str.upper().dask) == set(a.index.str.upper().dask)
# make sure to pass thru args & kwargs
assert 'contains' in dir(a.x.str)
assert_eq(a.x.str.contains('a'), df.x.str.contains('a'))
assert set(a.x.str.contains('a').dask) == set(a.x.str.contains('a').dask)
assert_eq(a.x.str.contains('d', case=False), df.x.str.contains('d', case=False))
assert set(a.x.str.contains('d', case=False).dask) == set(a.x.str.contains('d', case=False).dask)
for na in [True, False]:
assert_eq(a.x.str.contains('a', na=na), df.x.str.contains('a', na=na))
assert set(a.x.str.contains('a', na=na).dask) == set(a.x.str.contains('a', na=na).dask)
for regex in [True, False]:
assert_eq(a.x.str.contains('a', regex=regex), df.x.str.contains('a', regex=regex))
assert set(a.x.str.contains('a', regex=regex).dask) == set(a.x.str.contains('a', regex=regex).dask)
def test_empty_max():
meta = make_meta({'x': 'i8'})
a = dd.DataFrame({('x', 0): pd.DataFrame({'x': [1]}),
('x', 1): pd.DataFrame({'x': []})}, 'x',
meta, [None, None, None])
assert_eq(a.x.max(), 1)
def test_query():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
q = a.query('x**2 > y')
with ignoring(ImportError):
assert_eq(q, df.query('x**2 > y'))
def test_eval():
p = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
d = dd.from_pandas(p, npartitions=2)
with ignoring(ImportError):
assert_eq(p.eval('x + y'), d.eval('x + y'))
assert_eq(p.eval('z = x + y', inplace=False),
d.eval('z = x + y', inplace=False))
with pytest.raises(NotImplementedError):
d.eval('z = x + y', inplace=True)
if p.eval('z = x + y', inplace=None) is None:
with pytest.raises(NotImplementedError):
d.eval('z = x + y', inplace=None)
@pytest.mark.parametrize('include, exclude', [
([int], None),
(None, [int]),
([np.number, object], [float]),
(['datetime'], None)
])
def test_select_dtypes(include, exclude):
n = 10
df = pd.DataFrame({'cint': [1] * n,
'cstr': ['a'] * n,
'clfoat': [1.] * n,
'cdt': pd.date_range('2016-01-01', periods=n)})
a = dd.from_pandas(df, npartitions=2)
result = a.select_dtypes(include=include, exclude=exclude)
expected = df.select_dtypes(include=include, exclude=exclude)
assert_eq(result, expected)
# count dtypes
tm.assert_series_equal(a.get_dtype_counts(), df.get_dtype_counts())
tm.assert_series_equal(a.get_ftype_counts(), df.get_ftype_counts())
tm.assert_series_equal(result.get_dtype_counts(),
expected.get_dtype_counts())
tm.assert_series_equal(result.get_ftype_counts(),
expected.get_ftype_counts())
def test_deterministic_apply_concat_apply_names():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
assert sorted(a.x.nlargest(2).dask) == sorted(a.x.nlargest(2).dask)
assert sorted(a.x.nlargest(2).dask) != sorted(a.x.nlargest(3).dask)
assert (sorted(a.x.drop_duplicates().dask) ==
sorted(a.x.drop_duplicates().dask))
assert (sorted(a.groupby('x').y.mean().dask) ==
sorted(a.groupby('x').y.mean().dask))
# Test aca without passing in token string
f = lambda a: a.nlargest(5)
f2 = lambda a: a.nlargest(3)
assert (sorted(aca(a.x, f, f, a.x._meta).dask) !=
sorted(aca(a.x, f2, f2, a.x._meta).dask))
assert (sorted(aca(a.x, f, f, a.x._meta).dask) ==
sorted(aca(a.x, f, f, a.x._meta).dask))
# Test aca with keywords
def chunk(x, c_key=0, both_key=0):
return x.sum() + c_key + both_key
def agg(x, a_key=0, both_key=0):
return pd.Series(x).sum() + a_key + both_key
c_key = 2
a_key = 3
both_key = 4
res = aca(a.x, chunk=chunk, aggregate=agg, chunk_kwargs={'c_key': c_key},
aggregate_kwargs={'a_key': a_key}, both_key=both_key)
assert (sorted(res.dask) ==
sorted(aca(a.x, chunk=chunk, aggregate=agg,
chunk_kwargs={'c_key': c_key},
aggregate_kwargs={'a_key': a_key},
both_key=both_key).dask))
assert (sorted(res.dask) !=
sorted(aca(a.x, chunk=chunk, aggregate=agg,
chunk_kwargs={'c_key': c_key},
aggregate_kwargs={'a_key': a_key},
both_key=0).dask))
assert_eq(res, df.x.sum() + 2 * (c_key + both_key) + a_key + both_key)
def test_aca_meta_infer():
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': [5, 6, 7, 8]})
ddf = dd.from_pandas(df, npartitions=2)
def chunk(x, y, constant=1.0):
return (x + y + constant).head()
def agg(x):
return x.head()
res = aca([ddf, 2.0], chunk=chunk, aggregate=agg,
chunk_kwargs=dict(constant=2.0))
sol = (df + 2.0 + 2.0).head()
assert_eq(res, sol)
# Should infer as a scalar
res = aca([ddf.x], chunk=lambda x: pd.Series([x.sum()]),
aggregate=lambda x: x.sum())
assert isinstance(res, Scalar)
assert res.compute() == df.x.sum()
def test_aca_split_every():
df = pd.DataFrame({'x': [1] * 60})
ddf = dd.from_pandas(df, npartitions=15)
def chunk(x, y, constant=0):
return x.sum() + y + constant
def combine(x, constant=0):
return x.sum() + constant + 1
def agg(x, constant=0):
return x.sum() + constant + 2
f = lambda n: aca([ddf, 2.0], chunk=chunk, aggregate=agg, combine=combine,
chunk_kwargs=dict(constant=1.0),
combine_kwargs=dict(constant=2.0),
aggregate_kwargs=dict(constant=3.0),
split_every=n)
assert_max_deps(f(3), 3)
assert_max_deps(f(4), 4, False)
assert_max_deps(f(5), 5)
assert set(f(15).dask.keys()) == set(f(ddf.npartitions).dask.keys())
r3 = f(3)
r4 = f(4)
assert r3._name != r4._name
# Only intersect on reading operations
assert len(set(r3.dask.keys()) & set(r4.dask.keys())) == len(ddf.dask.keys())
# Keywords are different for each step
assert f(3).compute() == 60 + 15 * (2 + 1) + 7 * (2 + 1) + (3 + 2)
# Keywords are same for each step
res = aca([ddf, 2.0], chunk=chunk, aggregate=agg, combine=combine,
constant=3.0, split_every=3)
assert res.compute() == 60 + 15 * (2 + 3) + 7 * (3 + 1) + (3 + 2)
# No combine provided, combine is agg
res = aca([ddf, 2.0], chunk=chunk, aggregate=agg, constant=3, split_every=3)
assert res.compute() == 60 + 15 * (2 + 3) + 8 * (3 + 2)
# split_every must be >= 2
with pytest.raises(ValueError):
f(1)
# combine_kwargs with no combine provided
with pytest.raises(ValueError):
aca([ddf, 2.0], chunk=chunk, aggregate=agg, split_every=3,
chunk_kwargs=dict(constant=1.0),
combine_kwargs=dict(constant=2.0),
aggregate_kwargs=dict(constant=3.0))
def test_reduction_method():
df = pd.DataFrame({'x': range(50), 'y': range(50, 100)})
ddf = dd.from_pandas(df, npartitions=4)
chunk = lambda x, val=0: (x >= val).sum()
agg = lambda x: x.sum()
# Output of chunk is a scalar
res = ddf.x.reduction(chunk, aggregate=agg)
assert_eq(res, df.x.count())
# Output of chunk is a series
res = ddf.reduction(chunk, aggregate=agg)
assert res._name == ddf.reduction(chunk, aggregate=agg)._name
assert_eq(res, df.count())
# Test with keywords
res2 = ddf.reduction(chunk, aggregate=agg, chunk_kwargs={'val': 25})
res2._name == ddf.reduction(chunk, aggregate=agg,
chunk_kwargs={'val': 25})._name
assert res2._name != res._name
assert_eq(res2, (df >= 25).sum())
# Output of chunk is a dataframe
def sum_and_count(x):
return pd.DataFrame({'sum': x.sum(), 'count': x.count()})
res = ddf.reduction(sum_and_count,
aggregate=lambda x: x.groupby(level=0).sum())
assert_eq(res, pd.DataFrame({'sum': df.sum(), 'count': df.count()}))
def test_reduction_method_split_every():
df = pd.Series([1] * 60)
ddf = dd.from_pandas(df, npartitions=15)
def chunk(x, constant=0):
return x.sum() + constant
def combine(x, constant=0):
return x.sum() + constant + 1
def agg(x, constant=0):
return x.sum() + constant + 2
f = lambda n: ddf.reduction(chunk, aggregate=agg, combine=combine,
chunk_kwargs=dict(constant=1.0),
combine_kwargs=dict(constant=2.0),
aggregate_kwargs=dict(constant=3.0),
split_every=n)
assert_max_deps(f(3), 3)
assert_max_deps(f(4), 4, False)
assert_max_deps(f(5), 5)
assert set(f(15).dask.keys()) == set(f(ddf.npartitions).dask.keys())
r3 = f(3)
r4 = f(4)
assert r3._name != r4._name
# Only intersect on reading operations
assert len(set(r3.dask.keys()) & set(r4.dask.keys())) == len(ddf.dask.keys())
# Keywords are different for each step
assert f(3).compute() == 60 + 15 + 7 * (2 + 1) + (3 + 2)
# Keywords are same for each step
res = ddf.reduction(chunk, aggregate=agg, combine=combine, constant=3.0,
split_every=3)
assert res.compute() == 60 + 15 * 3 + 7 * (3 + 1) + (3 + 2)
# No combine provided, combine is agg
res = ddf.reduction(chunk, aggregate=agg, constant=3.0, split_every=3)
assert res.compute() == 60 + 15 * 3 + 8 * (3 + 2)
# split_every must be >= 2
with pytest.raises(ValueError):
f(1)
# combine_kwargs with no combine provided
with pytest.raises(ValueError):
ddf.reduction(chunk, aggregate=agg, split_every=3,
chunk_kwargs=dict(constant=1.0),
combine_kwargs=dict(constant=2.0),
aggregate_kwargs=dict(constant=3.0))
def test_pipe():
df = pd.DataFrame({'x': range(50), 'y': range(50, 100)})
ddf = dd.from_pandas(df, npartitions=4)
def f(x, y, z=0):
return x + y + z
assert_eq(ddf.pipe(f, 1, z=2), f(ddf, 1, z=2))
assert_eq(ddf.x.pipe(f, 1, z=2), f(ddf.x, 1, z=2))
def test_gh_517():
arr = np.random.randn(100, 2)
df = pd.DataFrame(arr, columns=['a', 'b'])
ddf = dd.from_pandas(df, 2)
assert ddf.index.nunique().compute() == 100
ddf2 = dd.from_pandas(pd.concat([df, df]), 5)
assert ddf2.index.nunique().compute() == 100
def test_drop_axis_1():
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': [5, 6, 7, 8],
'z': [9, 10, 11, 12]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.drop('y', axis=1), df.drop('y', axis=1))
assert_eq(ddf.drop(['y', 'z'], axis=1), df.drop(['y', 'z'], axis=1))
with pytest.raises(ValueError):
ddf.drop(['a', 'x'], axis=1)
assert_eq(ddf.drop(['a', 'x'], axis=1, errors='ignore'),
df.drop(['a', 'x'], axis=1, errors='ignore'))
def test_gh580():
df = pd.DataFrame({'x': np.arange(10, dtype=float)})
ddf = dd.from_pandas(df, 2)
assert_eq(np.cos(df['x']), np.cos(ddf['x']))
assert_eq(np.cos(df['x']), np.cos(ddf['x']))
def test_rename_dict():
renamer = {'a': 'A', 'b': 'B'}
assert_eq(d.rename(columns=renamer),
full.rename(columns=renamer))
def test_rename_function():
renamer = lambda x: x.upper()
assert_eq(d.rename(columns=renamer),
full.rename(columns=renamer))
def test_rename_index():
renamer = {0: 1}
pytest.raises(ValueError, lambda: d.rename(index=renamer))
def test_to_timestamp():
index = pd.PeriodIndex(freq='A', start='1/1/2001', end='12/1/2004')
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]}, index=index)
ddf = dd.from_pandas(df, npartitions=3)
assert_eq(ddf.to_timestamp(), df.to_timestamp())
assert_eq(ddf.to_timestamp(freq='M', how='s').compute(),
df.to_timestamp(freq='M', how='s'))
assert_eq(ddf.x.to_timestamp(), df.x.to_timestamp())
assert_eq(ddf.x.to_timestamp(freq='M', how='s').compute(),
df.x.to_timestamp(freq='M', how='s'))
def test_to_frame():
s = pd.Series([1, 2, 3], name='foo')
a = dd.from_pandas(s, npartitions=2)
assert_eq(s.to_frame(), a.to_frame())
assert_eq(s.to_frame('bar'), a.to_frame('bar'))
def test_apply():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
func = lambda row: row['x'] + row['y']
assert_eq(ddf.x.apply(lambda x: x + 1),
df.x.apply(lambda x: x + 1))
# specify columns
assert_eq(ddf.apply(lambda xy: xy[0] + xy[1], axis=1, columns=None),
df.apply(lambda xy: xy[0] + xy[1], axis=1))
assert_eq(ddf.apply(lambda xy: xy[0] + xy[1], axis='columns', columns=None),
df.apply(lambda xy: xy[0] + xy[1], axis='columns'))
# inference
assert_eq(ddf.apply(lambda xy: xy[0] + xy[1], axis=1),
df.apply(lambda xy: xy[0] + xy[1], axis=1))
assert_eq(ddf.apply(lambda xy: xy, axis=1),
df.apply(lambda xy: xy, axis=1))
# result will be dataframe
func = lambda x: pd.Series([x, x])
assert_eq(ddf.x.apply(func, name=[0, 1]), df.x.apply(func))
# inference
assert_eq(ddf.x.apply(func), df.x.apply(func))
# axis=0
with tm.assertRaises(NotImplementedError):
ddf.apply(lambda xy: xy, axis=0)
with tm.assertRaises(NotImplementedError):
ddf.apply(lambda xy: xy, axis='index')
def test_applymap():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.applymap(lambda x: x + 1), df.applymap(lambda x: x + 1))
assert_eq(ddf.applymap(lambda x: (x, x)), df.applymap(lambda x: (x, x)))
def test_abs():
df = pd.DataFrame({'A': [1, -2, 3, -4, 5],
'B': [-6., -7, -8, -9, 10],
'C': ['a', 'b', 'c', 'd', 'e']})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.A.abs(), df.A.abs())
assert_eq(ddf[['A', 'B']].abs(), df[['A', 'B']].abs())
pytest.raises(TypeError, lambda: ddf.C.abs())
pytest.raises(TypeError, lambda: ddf.abs())
def test_round():
df = pd.DataFrame({'col1': [1.123, 2.123, 3.123],
'col2': [1.234, 2.234, 3.234]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.round(), df.round())
assert_eq(ddf.round(2), df.round(2))
def test_cov():
# DataFrame
df = pd.util.testing.makeMissingDataframe(0.3, 42)
ddf = dd.from_pandas(df, npartitions=6)
res = ddf.cov()
res2 = ddf.cov(split_every=2)
res3 = ddf.cov(10)
res4 = ddf.cov(10, split_every=2)
sol = df.cov()
sol2 = df.cov(10)
assert_eq(res, sol)
assert_eq(res2, sol)
assert_eq(res3, sol2)
assert_eq(res4, sol2)
assert res._name == ddf.cov()._name
assert res._name != res2._name
assert res3._name != res4._name
assert res._name != res3._name
# Series
a = df.A
b = df.B
da = dd.from_pandas(a, npartitions=6)
db = dd.from_pandas(b, npartitions=7)
res = da.cov(db)
res2 = da.cov(db, split_every=2)
res3 = da.cov(db, 10)
res4 = da.cov(db, 10, split_every=2)
sol = a.cov(b)
sol2 = a.cov(b, 10)
assert_eq(res, sol)
assert_eq(res2, sol)
assert_eq(res3, sol2)
assert_eq(res4, sol2)
assert res._name == da.cov(db)._name
assert res._name != res2._name
assert res3._name != res4._name
assert res._name != res3._name
def test_corr():
# DataFrame
df = pd.util.testing.makeMissingDataframe(0.3, 42)
ddf = dd.from_pandas(df, npartitions=6)
res = ddf.corr()
res2 = ddf.corr(split_every=2)
res3 = ddf.corr(min_periods=10)
res4 = ddf.corr(min_periods=10, split_every=2)
sol = df.corr()
sol2 = df.corr(min_periods=10)
assert_eq(res, sol)
assert_eq(res2, sol)
assert_eq(res3, sol2)
assert_eq(res4, sol2)
assert res._name == ddf.corr()._name
assert res._name != res2._name
assert res3._name != res4._name
assert res._name != res3._name
pytest.raises(NotImplementedError, lambda: ddf.corr(method='spearman'))
# Series
a = df.A
b = df.B
da = dd.from_pandas(a, npartitions=6)
db = dd.from_pandas(b, npartitions=7)
res = da.corr(db)
res2 = da.corr(db, split_every=2)
res3 = da.corr(db, min_periods=10)
res4 = da.corr(db, min_periods=10, split_every=2)
sol = da.corr(db)
sol2 = da.corr(db, min_periods=10)
assert_eq(res, sol)
assert_eq(res2, sol)
assert_eq(res3, sol2)
assert_eq(res4, sol2)
assert res._name == da.corr(db)._name
assert res._name != res2._name
assert res3._name != res4._name
assert res._name != res3._name
pytest.raises(NotImplementedError, lambda: da.corr(db, method='spearman'))
pytest.raises(TypeError, lambda: da.corr(ddf))
def test_cov_corr_meta():
df = pd.DataFrame({'a': np.array([1, 2, 3]),
'b': np.array([1.0, 2.0, 3.0], dtype='f4'),
'c': np.array([1.0, 2.0, 3.0])},
index=pd.Index([1, 2, 3], name='myindex'))
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.corr(), df.corr())
assert_eq(ddf.cov(), df.cov())
assert ddf.a.cov(ddf.b)._meta.dtype == 'f8'
assert ddf.a.corr(ddf.b)._meta.dtype == 'f8'
@pytest.mark.slow
def test_cov_corr_stable():
df = pd.DataFrame(np.random.random((20000000, 2)) * 2 - 1, columns=['a', 'b'])
ddf = dd.from_pandas(df, npartitions=50)
assert_eq(ddf.cov(split_every=8), df.cov())
assert_eq(ddf.corr(split_every=8), df.corr())
def test_autocorr():
x = pd.Series(np.random.random(100))
dx = dd.from_pandas(x, npartitions=10)
assert_eq(dx.autocorr(2), x.autocorr(2))
assert_eq(dx.autocorr(0), x.autocorr(0))
assert_eq(dx.autocorr(-2), x.autocorr(-2))
assert_eq(dx.autocorr(2, split_every=3), x.autocorr(2))
pytest.raises(TypeError, lambda: dx.autocorr(1.5))
def test_apply_infer_columns():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
def return_df(x):
# will create new DataFrame which columns is ['sum', 'mean']
return pd.Series([x.sum(), x.mean()], index=['sum', 'mean'])
# DataFrame to completely different DataFrame
result = ddf.apply(return_df, axis=1)
assert isinstance(result, dd.DataFrame)
tm.assert_index_equal(result.columns, pd.Index(['sum', 'mean']))
assert_eq(result, df.apply(return_df, axis=1))
# DataFrame to Series
result = ddf.apply(lambda x: 1, axis=1)
assert isinstance(result, dd.Series)
assert result.name is None
assert_eq(result, df.apply(lambda x: 1, axis=1))
def return_df2(x):
return pd.Series([x * 2, x * 3], index=['x2', 'x3'])
# Series to completely different DataFrame
result = ddf.x.apply(return_df2)
assert isinstance(result, dd.DataFrame)
tm.assert_index_equal(result.columns, pd.Index(['x2', 'x3']))
assert_eq(result, df.x.apply(return_df2))
# Series to Series
result = ddf.x.apply(lambda x: 1)
assert isinstance(result, dd.Series)
assert result.name == 'x'
assert_eq(result, df.x.apply(lambda x: 1))
def test_index_time_properties():
i = tm.makeTimeSeries()
a = dd.from_pandas(i, npartitions=3)
assert 'day' in dir(a.index)
# returns a numpy array in pandas, but a Index in dask
assert_eq(a.index.day, pd.Index(i.index.day))
assert_eq(a.index.month, pd.Index(i.index.month))
def test_nlargest_nsmallest():
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(20),
'b': list(ascii_lowercase[:20]),
'c': np.random.permutation(20).astype('float64')})
ddf = dd.from_pandas(df, npartitions=3)
for m in ['nlargest', 'nsmallest']:
f = lambda df, *args, **kwargs: getattr(df, m)(*args, **kwargs)
res = f(ddf, 5, 'a')
res2 = f(ddf, 5, 'a', split_every=2)
sol = f(df, 5, 'a')
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
res = f(ddf, 5, ['a', 'b'])
res2 = f(ddf, 5, ['a', 'b'], split_every=2)
sol = f(df, 5, ['a', 'b'])
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
res = f(ddf.a, 5)
res2 = f(ddf.a, 5, split_every=2)
sol = f(df.a, 5)
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
def test_reset_index():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
sol = df.reset_index()
res = ddf.reset_index()
assert all(d is None for d in res.divisions)
assert_eq(res, sol, check_index=False)
sol = df.reset_index(drop=True)
res = ddf.reset_index(drop=True)
assert all(d is None for d in res.divisions)
assert_eq(res, sol, check_index=False)
sol = df.x.reset_index()
res = ddf.x.reset_index()
assert all(d is None for d in res.divisions)
assert_eq(res, sol, check_index=False)
sol = df.x.reset_index(drop=True)
res = ddf.x.reset_index(drop=True)
assert all(d is None for d in res.divisions)
assert_eq(res, sol, check_index=False)
def test_dataframe_compute_forward_kwargs():
x = dd.from_pandas(pd.DataFrame({'a': range(10)}), npartitions=2).a.sum()
x.compute(bogus_keyword=10)
def test_series_iteritems():
df = pd.DataFrame({'x': [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df['x'].iteritems(), ddf['x'].iteritems()):
assert a == b
def test_dataframe_iterrows():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df.iterrows(), ddf.iterrows()):
tm.assert_series_equal(a[1], b[1])
def test_dataframe_itertuples():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df.itertuples(), ddf.itertuples()):
assert a == b
def test_astype():
df = pd.DataFrame({'x': [1, 2, 3, None], 'y': [10, 20, 30, 40]},
index=[10, 20, 30, 40])
a = dd.from_pandas(df, 2)
assert_eq(a.astype(float), df.astype(float))
assert_eq(a.x.astype(float), df.x.astype(float))
def test_astype_categoricals():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'b', 'c'],
'y': [1, 2, 3, 4, 5]})
ddf = dd.from_pandas(df, 2)
ddf2 = ddf.astype({'x': 'category'})
assert not ddf2.x.cat.known
assert ddf2.x.dtype == 'category'
assert ddf2.compute().x.dtype == 'category'
dx = ddf.x.astype('category')
assert not dx.cat.known
assert dx.dtype == 'category'
assert dx.compute().dtype == 'category'
def test_groupby_callable():
a = pd.DataFrame({'x': [1, 2, 3, None], 'y': [10, 20, 30, 40]},
index=[1, 2, 3, 4])
b = dd.from_pandas(a, 2)
def iseven(x):
return x % 2 == 0
assert_eq(a.groupby(iseven).y.sum(),
b.groupby(iseven).y.sum())
assert_eq(a.y.groupby(iseven).sum(),
b.y.groupby(iseven).sum())
def test_set_index_sorted_true():
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': [10, 20, 30, 40],
'z': [4, 3, 2, 1]})
a = dd.from_pandas(df, 2, sort=False)
assert not a.known_divisions
b = a.set_index('x', sorted=True)
assert b.known_divisions
assert set(a.dask).issubset(set(b.dask))
for drop in [True, False]:
assert_eq(a.set_index('x', drop=drop),
df.set_index('x', drop=drop))
assert_eq(a.set_index(a.x, sorted=True, drop=drop),
df.set_index(df.x, drop=drop))
assert_eq(a.set_index(a.x + 1, sorted=True, drop=drop),
df.set_index(df.x + 1, drop=drop))
with pytest.raises(ValueError):
a.set_index(a.z, sorted=True)
def test_compute_divisions():
from dask.dataframe.shuffle import compute_divisions
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': [10, 20, 30, 40],
'z': [4, 3, 2, 1]},
index=[1, 3, 10, 20])
a = dd.from_pandas(df, 2, sort=False)
assert not a.known_divisions
divisions = compute_divisions(a)
b = copy(a)
b.divisions = divisions
assert_eq(a, b, check_divisions=False)
assert b.known_divisions
def test_methods_tokenize_differently():
df = pd.DataFrame({'x': [1, 2, 3, 4]})
df = dd.from_pandas(df, npartitions=1)
assert (df.x.map_partitions(lambda x: pd.Series(x.min()))._name !=
df.x.map_partitions(lambda x: pd.Series(x.max()))._name)
def test_sorted_index_single_partition():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 0, 1, 0]})
ddf = dd.from_pandas(df, npartitions=1)
assert_eq(ddf.set_index('x', sorted=True),
df.set_index('x'))
def _assert_info(df, ddf, memory_usage=True):
from io import StringIO
assert isinstance(df, pd.DataFrame)
assert isinstance(ddf, dd.DataFrame)
buf_pd, buf_da = StringIO(), StringIO()
df.info(buf=buf_pd, memory_usage=memory_usage)
ddf.info(buf=buf_da, verbose=True, memory_usage=memory_usage)
stdout_pd = buf_pd.getvalue()
stdout_da = buf_da.getvalue()
stdout_da = stdout_da.replace(str(type(ddf)), str(type(df)))
assert stdout_pd == stdout_da
def test_info():
from io import StringIO
from dask.compatibility import unicode
from pandas.formats import format
format._put_lines = put_lines
test_frames = [
pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 0, 1, 0]}, index=pd.Int64Index(range(4))), # No RangeIndex in dask
pd.DataFrame()
]
for df in test_frames:
ddf = dd.from_pandas(df, npartitions=4)
_assert_info(df, ddf)
buf = StringIO()
ddf = dd.from_pandas(pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 0, 1, 0]}, index=range(4)), npartitions=4)
# Verbose=False
ddf.info(buf=buf, verbose=False)
assert buf.getvalue() == unicode("<class 'dask.dataframe.core.DataFrame'>\n"
"Columns: 2 entries, x to y\n"
"dtypes: int64(2)")
# buf=None
assert ddf.info(buf=None) is None
def test_groupby_multilevel_info():
# GH 1844
from io import StringIO
from dask.compatibility import unicode
from pandas.formats import format
format._put_lines = put_lines
df = pd.DataFrame({'A': [1, 1, 2, 2],
'B': [1, 2, 3, 4],
'C': [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
g = ddf.groupby(['A', 'B']).sum()
# slight difference between memory repr (single additional space)
_assert_info(g.compute(), g, memory_usage=False)
buf = StringIO()
g.info(buf, verbose=False)
assert buf.getvalue() == unicode("""<class 'dask.dataframe.core.DataFrame'>
Columns: 1 entries, C to C
dtypes: int64(1)""")
# multilevel
g = ddf.groupby(['A', 'B']).agg(['count', 'sum'])
_assert_info(g.compute(), g, memory_usage=False)
buf = StringIO()
g.info(buf, verbose=False)
assert buf.getvalue() == unicode("""<class 'dask.dataframe.core.DataFrame'>
Columns: 2 entries, (C, count) to (C, sum)
dtypes: int64(2)""")
def test_categorize_info():
# assert that we can call info after categorize
# workaround for: https://github.com/pydata/pandas/issues/14368
from io import StringIO
from dask.compatibility import unicode
from pandas.formats import format
format._put_lines = put_lines
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': pd.Series(list('aabc')),
'z': pd.Series(list('aabc'))},
index=pd.Int64Index(range(4))) # No RangeIndex in dask
ddf = dd.from_pandas(df, npartitions=4).categorize(['y'])
# Verbose=False
buf = StringIO()
ddf.info(buf=buf, verbose=True)
assert buf.getvalue() == unicode("<class 'dask.dataframe.core.DataFrame'>\n"
"Int64Index: 4 entries, 0 to 3\n"
"Data columns (total 3 columns):\n"
"x 4 non-null int64\n"
"y 4 non-null category\n"
"z 4 non-null object\n"
"dtypes: category(1), object(1), int64(1)")
def test_gh_1301():
df = pd.DataFrame([['1', '2'], ['3', '4']])
ddf = dd.from_pandas(df, npartitions=2)
ddf2 = ddf.assign(y=ddf[1].astype(int))
assert_eq(ddf2, df.assign(y=df[1].astype(int)))
assert ddf2.dtypes['y'] == np.dtype(int)
def test_timeseries_sorted():
df = tm.makeTimeDataFrame()
ddf = dd.from_pandas(df.reset_index(), npartitions=2)
df.index.name = 'index'
assert_eq(ddf.set_index('index', sorted=True, drop=True), df)
def test_column_assignment():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 0, 1, 0]})
ddf = dd.from_pandas(df, npartitions=2)
orig = ddf.copy()
ddf['z'] = ddf.x + ddf.y
df['z'] = df.x + df.y
assert_eq(df, ddf)
assert 'z' not in orig.columns
def test_columns_assignment():
df = pd.DataFrame({'x': [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
df2 = df.assign(y=df.x + 1, z=df.x - 1)
df[['a', 'b']] = df2[['y', 'z']]
ddf2 = ddf.assign(y=ddf.x + 1, z=ddf.x - 1)
ddf[['a', 'b']] = ddf2[['y', 'z']]
assert_eq(df, ddf)
def test_attribute_assignment():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
'y': [1., 2., 3., 4., 5.]})
ddf = dd.from_pandas(df, npartitions=2)
ddf.y = ddf.x + ddf.y
assert_eq(ddf, df.assign(y=df.x + df.y))
def test_inplace_operators():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
'y': [1., 2., 3., 4., 5.]})
ddf = dd.from_pandas(df, npartitions=2)
ddf.y **= 0.5
assert_eq(ddf.y, df.y ** 0.5)
assert_eq(ddf, df.assign(y=df.y ** 0.5))
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("idx", [
np.arange(100),
sorted(np.random.random(size=100)),
pd.date_range('20150101', periods=100)
])
def test_idxmaxmin(idx, skipna):
df = pd.DataFrame(np.random.randn(100, 5), columns=list('abcde'), index=idx)
df.b.iloc[31] = np.nan
df.d.iloc[78] = np.nan
ddf = dd.from_pandas(df, npartitions=3)
assert_eq(df.idxmax(axis=1, skipna=skipna),
ddf.idxmax(axis=1, skipna=skipna))
assert_eq(df.idxmin(axis=1, skipna=skipna),
ddf.idxmin(axis=1, skipna=skipna))
assert_eq(df.idxmax(skipna=skipna), ddf.idxmax(skipna=skipna))
assert_eq(df.idxmax(skipna=skipna),
ddf.idxmax(skipna=skipna, split_every=2))
assert (ddf.idxmax(skipna=skipna)._name !=
ddf.idxmax(skipna=skipna, split_every=2)._name)
assert_eq(df.idxmin(skipna=skipna), ddf.idxmin(skipna=skipna))
assert_eq(df.idxmin(skipna=skipna),
ddf.idxmin(skipna=skipna, split_every=2))
assert (ddf.idxmin(skipna=skipna)._name !=
ddf.idxmin(skipna=skipna, split_every=2)._name)
assert_eq(df.a.idxmax(skipna=skipna), ddf.a.idxmax(skipna=skipna))
assert_eq(df.a.idxmax(skipna=skipna),
ddf.a.idxmax(skipna=skipna, split_every=2))
assert (ddf.a.idxmax(skipna=skipna)._name !=
ddf.a.idxmax(skipna=skipna, split_every=2)._name)
assert_eq(df.a.idxmin(skipna=skipna), ddf.a.idxmin(skipna=skipna))
assert_eq(df.a.idxmin(skipna=skipna),
ddf.a.idxmin(skipna=skipna, split_every=2))
assert (ddf.a.idxmin(skipna=skipna)._name !=
ddf.a.idxmin(skipna=skipna, split_every=2)._name)
def test_getitem_meta():
data = {'col1': ['a', 'a', 'b'],
'col2': [0, 1, 0]}
df = pd.DataFrame(data=data, columns=['col1', 'col2'])
ddf = dd.from_pandas(df, npartitions=1)
assert_eq(df.col2[df.col1 == 'a'], ddf.col2[ddf.col1 == 'a'])
def test_getitem_multilevel():
pdf = pd.DataFrame({('A', '0') : [1,2,2], ('B', '1') : [1,2,3]})
ddf = dd.from_pandas(pdf, npartitions=3)
assert_eq(pdf['A', '0'], ddf['A', '0'])
assert_eq(pdf[[('A', '0'), ('B', '1')]], ddf[[('A', '0'), ('B', '1')]])
def test_set_index_sorted_min_max_same():
a = pd.DataFrame({'x': [1, 2, 3], 'y': [0, 0, 0]})
b = pd.DataFrame({'x': [1, 2, 3], 'y': [1, 1, 1]})
aa = delayed(a)
bb = delayed(b)
df = dd.from_delayed([aa, bb], meta=a)
assert not df.known_divisions
df2 = df.set_index('y', sorted=True)
assert df2.divisions == (0, 1, 1)
def test_diff():
df = pd.DataFrame(np.random.randn(100, 5), columns=list('abcde'))
ddf = dd.from_pandas(df, 5)
assert_eq(ddf.diff(), df.diff())
assert_eq(ddf.diff(0), df.diff(0))
assert_eq(ddf.diff(2), df.diff(2))
assert_eq(ddf.diff(-2), df.diff(-2))
assert_eq(ddf.diff(2, axis=1), df.diff(2, axis=1))
assert_eq(ddf.a.diff(), df.a.diff())
assert_eq(ddf.a.diff(0), df.a.diff(0))
assert_eq(ddf.a.diff(2), df.a.diff(2))
assert_eq(ddf.a.diff(-2), df.a.diff(-2))
assert ddf.diff(2)._name == ddf.diff(2)._name
assert ddf.diff(2)._name != ddf.diff(3)._name
pytest.raises(TypeError, lambda: ddf.diff(1.5))
def test_shift():
df = tm.makeTimeDataFrame()
ddf = dd.from_pandas(df, npartitions=4)
# DataFrame
assert_eq(ddf.shift(), df.shift())
assert_eq(ddf.shift(0), df.shift(0))
assert_eq(ddf.shift(2), df.shift(2))
assert_eq(ddf.shift(-2), df.shift(-2))
assert_eq(ddf.shift(2, axis=1), df.shift(2, axis=1))
# Series
assert_eq(ddf.A.shift(), df.A.shift())
assert_eq(ddf.A.shift(0), df.A.shift(0))
assert_eq(ddf.A.shift(2), df.A.shift(2))
assert_eq(ddf.A.shift(-2), df.A.shift(-2))
with pytest.raises(TypeError):
ddf.shift(1.5)
def test_shift_with_freq():
df = tm.makeTimeDataFrame(30)
# DatetimeIndex
for data_freq, divs1 in [('B', False), ('D', True), ('H', True)]:
df = df.set_index(tm.makeDateIndex(30, freq=data_freq))
ddf = dd.from_pandas(df, npartitions=4)
for freq, divs2 in [('S', True), ('W', False),
(pd.Timedelta(10, unit='h'), True)]:
for d, p in [(ddf, df), (ddf.A, df.A), (ddf.index, df.index)]:
res = d.shift(2, freq=freq)
assert_eq(res, p.shift(2, freq=freq))
assert res.known_divisions == divs2
# Index shifts also work with freq=None
res = ddf.index.shift(2)
assert_eq(res, df.index.shift(2))
assert res.known_divisions == divs1
# PeriodIndex
for data_freq, divs in [('B', False), ('D', True), ('H', True)]:
df = df.set_index(pd.period_range('2000-01-01', periods=30,
freq=data_freq))
ddf = dd.from_pandas(df, npartitions=4)
for d, p in [(ddf, df), (ddf.A, df.A)]:
res = d.shift(2, freq=data_freq)
assert_eq(res, p.shift(2, freq=data_freq))
assert res.known_divisions == divs
# PeriodIndex.shift doesn't have `freq` parameter
res = ddf.index.shift(2)
assert_eq(res, df.index.shift(2))
assert res.known_divisions == divs
with pytest.raises(ValueError):
ddf.index.shift(2, freq='D') # freq keyword not supported
# TimedeltaIndex
for data_freq in ['T', 'D', 'H']:
df = df.set_index(tm.makeTimedeltaIndex(30, freq=data_freq))
ddf = dd.from_pandas(df, npartitions=4)
for freq in ['S', pd.Timedelta(10, unit='h')]:
for d, p in [(ddf, df), (ddf.A, df.A), (ddf.index, df.index)]:
res = d.shift(2, freq=freq)
assert_eq(res, p.shift(2, freq=freq))
assert res.known_divisions
# Index shifts also work with freq=None
res = ddf.index.shift(2)
assert_eq(res, df.index.shift(2))
assert res.known_divisions
# Other index types error
df = tm.makeDataFrame()
ddf = dd.from_pandas(df, npartitions=4)
pytest.raises(NotImplementedError, lambda: ddf.shift(2, freq='S'))
pytest.raises(NotImplementedError, lambda: ddf.A.shift(2, freq='S'))
pytest.raises(NotImplementedError, lambda: ddf.index.shift(2))
@pytest.mark.parametrize('method', ['first', 'last'])
def test_first_and_last(method):
f = lambda x, offset: getattr(x, method)(offset)
freqs = ['12h', 'D']
offsets = ['0d', '100h', '20d', '20B', '3W', '3M', '400d', '13M']
for freq in freqs:
index = pd.date_range('1/1/2000', '1/1/2001', freq=freq)[::4]
df = pd.DataFrame(np.random.random((len(index), 4)), index=index,
columns=['A', 'B', 'C', 'D'])
ddf = dd.from_pandas(df, npartitions=10)
for offset in offsets:
assert_eq(f(ddf, offset), f(df, offset))
assert_eq(f(ddf.A, offset), f(df.A, offset))
@pytest.mark.parametrize('npartitions', [1, 4, 20])
@pytest.mark.parametrize('split_every', [2, 5])
@pytest.mark.parametrize('split_out', [None, 1, 5, 20])
def test_hash_split_unique(npartitions, split_every, split_out):
from string import ascii_lowercase
s = pd.Series(np.random.choice(list(ascii_lowercase), 1000, replace=True))
ds = dd.from_pandas(s, npartitions=npartitions)
dropped = ds.unique(split_every=split_every, split_out=split_out)
dsk = dropped._optimize(dropped.dask, dropped._keys())
from dask.core import get_deps
dependencies, dependents = get_deps(dsk)
assert len([k for k, v in dependencies.items() if not v]) == npartitions
assert dropped.npartitions == (split_out or 1)
assert sorted(dropped.compute(get=dask.get)) == sorted(s.unique())
@pytest.mark.parametrize('split_every', [None, 2])
def test_split_out_drop_duplicates(split_every):
x = np.concatenate([np.arange(10)] * 100)[:, None]
y = x.copy()
z = np.concatenate([np.arange(20)] * 50)[:, None]
rs = np.random.RandomState(1)
rs.shuffle(x)
rs.shuffle(y)
rs.shuffle(z)
df = pd.DataFrame(np.concatenate([x, y, z], axis=1), columns=['x', 'y', 'z'])
ddf = dd.from_pandas(df, npartitions=20)
for subset, keep in product([None, ['x', 'z']], ['first', 'last']):
sol = df.drop_duplicates(subset=subset, keep=keep)
res = ddf.drop_duplicates(subset=subset, keep=keep,
split_every=split_every, split_out=10)
assert res.npartitions == 10
assert_eq(sol, res)
@pytest.mark.parametrize('split_every', [None, 2])
def test_split_out_value_counts(split_every):
df = pd.DataFrame({'x': [1, 2, 3] * 100})
ddf = dd.from_pandas(df, npartitions=5)
assert ddf.x.value_counts(split_out=10, split_every=split_every).npartitions == 10
assert_eq(ddf.x.value_counts(split_out=10, split_every=split_every), df.x.value_counts())
def test_values():
from dask.array.utils import assert_eq
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
ddf = dd.from_pandas(df, 2)
assert_eq(df.values, ddf.values)
assert_eq(df.x.values, ddf.x.values)
assert_eq(df.y.values, ddf.y.values)
assert_eq(df.index.values, ddf.index.values)
def test_copy():
df = pd.DataFrame({'x': [1, 2, 3]})
a = dd.from_pandas(df, npartitions=2)
b = a.copy()
a['y'] = a.x * 2
assert_eq(b, df)
df['y'] = df.x * 2
def test_del():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
a = dd.from_pandas(df, 2)
b = a.copy()
del a['x']
assert_eq(b, df)
del df['x']
assert_eq(a, df)
@pytest.mark.parametrize('index', [True, False])
@pytest.mark.parametrize('deep', [True, False])
def test_memory_usage(index, deep):
df = pd.DataFrame({'x': [1, 2, 3],
'y': [1.0, 2.0, 3.0],
'z': ['a', 'b', 'c']})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(df.memory_usage(index=index, deep=deep),
ddf.memory_usage(index=index, deep=deep))
assert (df.x.memory_usage(index=index, deep=deep) ==
ddf.x.memory_usage(index=index, deep=deep).compute())
@pytest.mark.parametrize('reduction', ['sum', 'mean', 'std', 'var', 'count',
'min', 'max', 'idxmin', 'idxmax',
'prod', 'all', 'sem'])
def test_dataframe_reductions_arithmetic(reduction):
df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
'y': [1.1, 2.2, 3.3, 4.4, 5.5]})
ddf = dd.from_pandas(df, npartitions=3)
assert_eq(ddf - (getattr(ddf, reduction)() + 1),
df - (getattr(df, reduction)() + 1))
def test_datetime_loc_open_slicing():
dtRange = pd.date_range('01.01.2015','05.05.2015')
df = pd.DataFrame(np.random.random((len(dtRange), 2)), index=dtRange)
ddf = dd.from_pandas(df, npartitions=5)
assert_eq(df.loc[:'02.02.2015'], ddf.loc[:'02.02.2015'])
assert_eq(df.loc['02.02.2015':], ddf.loc['02.02.2015':])
assert_eq(df[0].loc[:'02.02.2015'], ddf[0].loc[:'02.02.2015'])
assert_eq(df[0].loc['02.02.2015':], ddf[0].loc['02.02.2015':])
def test_to_datetime():
df = pd.DataFrame({'year': [2015, 2016],
'month': [2, 3],
'day': [4, 5]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(pd.to_datetime(df), dd.to_datetime(ddf))
s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000'] * 100)
ds = dd.from_pandas(s, npartitions=10)
assert_eq(pd.to_datetime(s, infer_datetime_format=True),
dd.to_datetime(ds, infer_datetime_format=True))
@pytest.mark.parametrize('drop', [0, 9])
def test_slice_on_filtered_boundary(drop):
# https://github.com/dask/dask/issues/2211
x = np.arange(10)
x[[5, 6]] -= 2
df = pd.DataFrame({"A": x, "B": np.arange(len(x))})
pdf = df.set_index("A").query("B != {}".format(drop))
ddf = dd.from_pandas(df, 1).set_index("A").query("B != {}".format(drop))
result = dd.concat([ddf, ddf.rename(columns={"B": "C"})], axis=1)
expected = pd.concat([pdf, pdf.rename(columns={"B": "C"})], axis=1)
assert_eq(result, expected)
assert not result.compute().index.is_monotonic # didn't accidentally sort
def test_boundary_slice_nonmonotonic():
x = np.array([-1, -2, 2, 4, 3])
df = pd.DataFrame({"B": range(len(x))}, index=x)
result = boundary_slice(df, 0, 4)
expected = df.iloc[2:]
tm.assert_frame_equal(result, expected)
result = boundary_slice(df, -1, 4)
expected = df.drop(-2)
tm.assert_frame_equal(result, expected)
result = boundary_slice(df, -2, 3)
expected = df.drop(4)
tm.assert_frame_equal(result, expected)
result = boundary_slice(df, -2, 3.5)
expected = df.drop(4)
tm.assert_frame_equal(result, expected)
result = boundary_slice(df, -2, 4)
expected = df
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('index, left, right', [
(range(10), 0, 9),
(range(10), -1, None),
(range(10), None, 10),
([-1, 0, 2, 1], None, None),
([-1, 0, 2, 1], -1, None),
([-1, 0, 2, 1], None, 2),
([-1, 0, 2, 1], -2, 3),
(pd.date_range("2017", periods=10), None, None),
(pd.date_range("2017", periods=10), pd.Timestamp("2017"), None),
(pd.date_range("2017", periods=10), None, pd.Timestamp("2017-01-10")),
(pd.date_range("2017", periods=10), pd.Timestamp("2016"), None),
(pd.date_range("2017", periods=10), None, pd.Timestamp("2018")),
])
def test_boundary_slice_same(index, left, right):
df = pd.DataFrame({"A": range(len(index))}, index=index)
result = boundary_slice(df, left, right)
tm.assert_frame_equal(result, df)
| bsd-3-clause |
anirudhnair/KernelBasedCharcterization | TAUTraceAnalysis/EBSTraceStat/EBSTraceStat.py | 1 | 35614 | '''
Created on Feb 11, 2014
@author: anirudhj
'''
import os,shutil
from sax import saxpy as SaX
import fileinput, shlex
from Settings import Common
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
class EBSTraceStat(object):
'''
get stats from EBS trace
'''
def __init__(self, filePath):
self.m_filePath = ""
self.traceTable = None
self.m_Metrics = None
self.m_pid = '0'
self.m_sax = None
self.m_ObjDumpReader = None
if os.path.isfile(filePath + ".bak"):
os.remove(filePath + ".bak")
shutil.copyfile(filePath, filePath + ".bak")
self.m_filePath = filePath + ".bak"
self.m_pid = os.path.basename(self.m_filePath).split(".")[2]
self.m_sax = SaX.SAX(Common.SIGLEN, Common.SAX_ALPHABETS, 1e-6)
def RegisterObjDumpReader(self,reader):
self.m_ObjDumpReader = reader
def CleanTrace(self):
'''
remove lines with %
remove $ from line begining
note PAPI events and remove all lines before this line
split | as delimiter, extract 4th token and retian in file
'''
for line in fileinput.FileInput(self.m_filePath,inplace=1):
if "Metrics" in line:
self.ExtractMetrics(line)
continue
elif ( line[0] == '#'):
continue
if ('$' in line or '%' in line) :
line = line.replace('$','\n')
line = line.replace('%','\n%')
print line,
fileinput.close()
for line in fileinput.FileInput(self.m_filePath,inplace=1):
if( line[0] == "%"):
continue;
cleanedLine = line.strip()
if (cleanedLine):
metricLine = (line.split("|")[4]).strip()
instPointer = (line.split("|")[6]).strip()
metricVals = ','.join(['"{0}"'.format(fragment) if ' ' in fragment else fragment \
for fragment in shlex.split(metricLine)])
metricVals = metricVals + ',' + instPointer
print metricVals
#print ','.join(shlex.split(cleanedLine))
fileinput.close()
def ExtractMetrics(self, line):
listMetrics = line.split(' ')
del listMetrics[0]
del listMetrics[0]
self.m_Metrics = listMetrics
def GetMetricsList(self):
return self.m_Metrics
def LoadTrace(self):
'''
The first metric is always TIME. Second the EBS_SOURCE
'''
listHeader = []
listHeader.extend(self.m_Metrics)
listHeader.append(Common.IP)
self.traceTable = pd.read_table(self.m_filePath,sep=',',header=None,names= listHeader)
self.traceTable = self.traceTable.sort_index(by=self.m_Metrics[0], ascending=[True])
sizeList = len(self.m_Metrics)
for index_ in range(2,sizeList):
self.traceTable[self.m_Metrics[index_]] = self.traceTable[self.m_Metrics[index_]]. \
sub(self.traceTable[self.m_Metrics[index_]].shift(), fill_value = 0)
def AnalyzeIPFDBetweenTimeStamps(self,startTimes, endTimes):
iterIPFDMap = {}
iter_ = 0
for index_ in range(len(startTimes)):
startTime_ = startTimes[index_]
endTime_ = endTimes[index_]
reducedTrace_ = self.traceTable[(self.traceTable['TIME'] >= startTime_) & (self.traceTable['TIME'] <= endTime_)]
listIPs = reducedTrace_[Common.IP].values
#print listIPs
listInsts = []
for ip in listIPs:
instType = self.m_ObjDumpReader.GetInstructionType(ip[2:])
if( instType == 0):
continue
listInsts.append(instType)
if( len(listInsts) == 0):
print "---------------------------------------------------"
print "WARNING: Empty instruction list"
print "---------------------------------------------------"
instCtr = Common.GetFrequencyList(listInsts)
iterIPFDMap[iter_] = instCtr
#ploting
x = []
y = []
for key in instCtr:
x.append(key)
x.sort()
for key in x:
y.append(instCtr[key])
plt.bar(np.arange(len(x)) ,y,align='center')
plt.xticks(np.arange(len(x)), x,rotation=30, size='small')
plt.savefig(os.path.dirname(self.m_filePath) + "/IPFD_" + str(iter_),format="pdf",dpi=500)
plt.clf()
iter_+=1
return iterIPFDMap
def AnalyzeBetweenTimeStamps(self,x_index,y_index,startTimes,endTimes):
startTime = startTimes[0]
endTime = endTimes[len(endTimes) - 1]
reducedTrace = self.traceTable[(self.traceTable['TIME'] >= startTime) & (self.traceTable['TIME'] <= endTime)]
y_vals = reducedTrace[self.m_Metrics[y_index]]
x_vals = reducedTrace[self.m_Metrics[x_index]].values
plt.plot(x_vals, y_vals , 'g-') #first value is useless as it is not sub'ed
# sax string rep for each iter
saxStr = ''
iterSAXMap = {}
iter_ = 0
for index_ in range(len(startTimes)):
startTime_ = startTimes[index_]
endTime_ = endTimes[index_]
reducedTrace_ = self.traceTable[(self.traceTable['TIME'] >= startTime_) & (self.traceTable['TIME'] <= endTime_)]
y_vals_ = reducedTrace_[self.m_Metrics[y_index]]
saxStr_,indices = self.m_sax.to_letter_rep(y_vals_)
saxStr+=saxStr_
iterSAXMap[iter_] = saxStr_
iter_+=1
saxNum = Common.GetNumArrayFromString(saxStr)
#ploting
vlinePoints = endTimes
plt.vlines(vlinePoints, [y_vals.min()],[y_vals.max()],'r','dashed')
plt.xlabel(self.m_Metrics[x_index])
plt.ylabel(self.m_Metrics[y_index])
plt.xlim(x_vals.min(), x_vals.max())
fileDumpPath = (self.m_Metrics[x_index] + "_" + self.m_Metrics[y_index] + "_"+ self.m_pid +".pdf").strip()
fileDumpPath = os.path.dirname(self.m_filePath) + "/" + fileDumpPath
figure = plt.gcf()
figure.set_size_inches(24, 16)
plt.savefig(fileDumpPath,format="pdf",dpi=500)
plt.clf()
plt.plot(saxNum, 'g-')
plt.xlabel('SAX string Length')
plt.ylabel('SAX alphabets')
plt.title(self.m_Metrics[y_index])
xticks = range(0,Common.SIGLEN*len(startTimes),Common.SIGLEN)
plt.xticks(xticks)
plt.yticks(range(Common.SAX_ALPHABETS))
fileDumpPath = (self.m_Metrics[x_index] + "_" + self.m_Metrics[y_index] + "_"+ self.m_pid +"_SAX.pdf").strip()
fileDumpPath = os.path.dirname(self.m_filePath) + "/" + fileDumpPath
figure = plt.gcf()
figure.set_size_inches(18, 12)
plt.savefig(fileDumpPath,format="pdf",dpi=500)
plt.clf()
return iterSAXMap
def Analyze(self,x_index,y_index):
y_vals = self.traceTable[self.m_Metrics[y_index]].sub(self.traceTable[self.m_Metrics[y_index]].shift(), fill_value = 0).values
y_vals = y_vals[1:]
plt.plot(self.traceTable[self.m_Metrics[x_index]].values[1:], y_vals, 'g-')
plt.xlabel(self.m_Metrics[x_index])
plt.ylabel(self.m_Metrics[y_index])
fileDumpPath = (self.m_Metrics[x_index] + "_" + self.m_Metrics[y_index] + "_" + self.m_pid + ".pdf").strip()
fileDumpPath = os.path.dirname(self.m_filePath) + "/" + fileDumpPath
figure = plt.gcf()
figure.set_size_inches(24, 16)
plt.savefig(fileDumpPath,format="pdf",dpi=500)
plt.clf()
def SAXString(self,index,startTime,endTime):
reducedTrace = self.traceTable[(self.traceTable['TIME'] >= startTime) & (self.traceTable['TIME'] <= endTime)]
vals = reducedTrace[self.m_Metrics[index]].sub(reducedTrace[self.m_Metrics[index]].shift(), fill_value = 0).values
vals = vals[1:]
return self.m_sax.to_letter_rep(vals)
def IPCAnalyzeBetweenTimeStamps(self,startTimes,endTimes):
startTime = startTimes[0]
endTime = endTimes[len(endTimes) - 1]
tmptrace = self.traceTable
tmptrace[self.m_Metrics[Common.INS_INDEX]] = tmptrace[self.m_Metrics[Common.INS_INDEX]].sub(tmptrace[self.m_Metrics[Common.INS_INDEX]].shift(), fill_value = 0)
reducedTrace = tmptrace[(tmptrace['TIME'] >= startTime) & (tmptrace['TIME'] <= endTime)]
insDiff = reducedTrace[self.m_Metrics[Common.INS_INDEX]].values
cycDiff = reducedTrace[self.m_Metrics[Common.CYC_INDEX]].values
IPC = np.divide(insDiff.astype(float),cycDiff.astype(float))
x_vals = reducedTrace[self.m_Metrics[Common.TIME_INDEX]].values
plt.plot(x_vals, IPC , 'g-') #first value is useless as it is not sub'ed
# sax string rep for each iter
saxStr = ''
iterSAXMap = {}
iter_ = 0
for index_ in range(len(startTimes)):
startTime_ = startTimes[index_]
endTime_ = endTime[index_]
reducedTrace_ = tmptrace[(tmptrace['TIME'] >= startTime_) & (tmptrace['TIME'] <= endTime_)]
insDiff_ = reducedTrace_[self.m_Metrics[Common.INS_INDEX]].values
cycDiff_ = reducedTrace_[self.m_Metrics[Common.CYC_INDEX]].values
IPC_ = np.divide(insDiff_.astype(float),cycDiff_.astype(float))
saxStr_,indices = self.m_sax.to_letter_rep(IPC_)
saxStr=+saxStr_
iterSAXMap[iter_] = saxStr_
iter+=1
saxNum = Common.GetNumArrayFromString(saxStr)
#ploting
vlinePoints = endTimes
plt.vlines(vlinePoints, [IPC.min()],[IPC.max()],'r','dashed')
plt.xlabel(self.m_Metrics[Common.TIME_INDEX])
plt.ylabel('IPC')
plt.xlim(x_vals.min(), x_vals.max())
fileDumpPath = (self.m_Metrics[Common.TIME_INDEX] + "_" + 'IPC' + "_"+ self.m_pid +".pdf").strip()
fileDumpPath = os.path.dirname(self.m_filePath) + "/" + fileDumpPath
figure = plt.gcf()
figure.set_size_inches(24, 16)
plt.savefig(fileDumpPath,format="pdf",dpi=500)
plt.clf()
plt.plot(saxNum, 'g-')
plt.xlabel('SAX string Length')
plt.ylabel('SAX alphabets')
plt.title('IPC')
xticks = range(0,Common.SIGLEN*len(startTimes),Common.SIGLEN)
plt.xticks(xticks)
plt.yticks(range(Common.SAX_ALPHABETS))
fileDumpPath = (self.m_Metrics[Common.TIME_INDEX] + "_" + 'IPC' + "_"+ self.m_pid +"_SAX.pdf").strip()
fileDumpPath = os.path.dirname(self.m_filePath) + "/" + fileDumpPath
figure = plt.gcf()
figure.set_size_inches(18, 12)
plt.savefig(fileDumpPath,format="pdf",dpi=500)
plt.clf()
return iterSAXMap
def GetPID(self):
return int(self.m_pid)
| apache-2.0 |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/parcoords/line/_colorbar.py | 1 | 73457 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ColorBar(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "parcoords.line"
_path_str = "parcoords.line.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"len",
"lenmode",
"minexponent",
"nticks",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabeloverflow",
"ticklabelposition",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"titlefont",
"titleside",
"x",
"xanchor",
"xpad",
"y",
"yanchor",
"ypad",
}
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
# len
# ---
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
# lenmode
# -------
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
# minexponent
# -----------
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
# outlinecolor
# ------------
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
# outlinewidth
# ------------
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# thicknessmode
# -------------
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.line.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.parcoords.line.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for dates
see: https://github.com/d3/d3-time-format#locale_format. We add
two items to d3's date formatter: "%h" for half of the year as
a decimal number as well as "%{n}f" for fractional seconds with
n digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.parcoords.line.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
Returns
-------
tuple[plotly.graph_objs.parcoords.line.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
# tickformatstopdefaults
# ----------------------
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.parcoords.line
.colorbar.tickformatstopdefaults), sets the default property
values to use for elements of
parcoords.line.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.line.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.parcoords.line.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
# ticklabeloverflow
# -----------------
@property
def ticklabeloverflow(self):
"""
Determines how we handle tick labels that would overflow either
the graph div or the domain of the axis. The default value for
inside tick labels is *hide past domain*. In other cases the
default is *hide past div*.
The 'ticklabeloverflow' property is an enumeration that may be specified as:
- One of the following enumeration values:
['allow', 'hide past div', 'hide past domain']
Returns
-------
Any
"""
return self["ticklabeloverflow"]
@ticklabeloverflow.setter
def ticklabeloverflow(self, val):
self["ticklabeloverflow"] = val
# ticklabelposition
# -----------------
@property
def ticklabelposition(self):
"""
Determines where tick labels are drawn.
The 'ticklabelposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', 'outside top', 'inside top',
'outside bottom', 'inside bottom']
Returns
-------
Any
"""
return self["ticklabelposition"]
@ticklabelposition.setter
def ticklabelposition(self, val):
self["ticklabelposition"] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for ticktext .
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for tickvals .
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.line.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
Returns
-------
plotly.graph_objs.parcoords.line.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Deprecated: Please use parcoords.line.colorbar.title.font
instead. Sets this color bar's title font. Note that the
title's font used to be set by the now deprecated `titlefont`
attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.line.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
"""
return self["titlefont"]
@titlefont.setter
def titlefont(self, val):
self["titlefont"] = val
# titleside
# ---------
@property
def titleside(self):
"""
Deprecated: Please use parcoords.line.colorbar.title.side
instead. Determines the location of color bar's title with
respect to the color bar. Note that the title's location used
to be set by the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
"""
return self["titleside"]
@titleside.setter
def titleside(self, val):
self["titleside"] = val
# x
# -
@property
def x(self):
"""
Sets the x position of the color bar (in plot fraction).
The 'x' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
# xpad
# ----
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
# y
# -
@property
def y(self):
"""
Sets the y position of the color bar (in plot fraction).
The 'y' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
# ypad
# ----
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for
dates see: https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal
number as well as "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.parcoords.line.
colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.parcoo
rds.line.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
parcoords.line.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.parcoords.line.colorbar.Ti
tle` instance or dict with compatible properties
titlefont
Deprecated: Please use
parcoords.line.colorbar.title.font instead. Sets this
color bar's title font. Note that the title's font used
to be set by the now deprecated `titlefont` attribute.
titleside
Deprecated: Please use
parcoords.line.colorbar.title.side instead. Determines
the location of color bar's title with respect to the
color bar. Note that the title's location used to be
set by the now deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
"""
_mapped_properties = {
"titlefont": ("title", "font"),
"titleside": ("title", "side"),
}
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabeloverflow=None,
ticklabelposition=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
titleside=None,
x=None,
xanchor=None,
xpad=None,
y=None,
yanchor=None,
ypad=None,
**kwargs
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.parcoords.line.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for
dates see: https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal
number as well as "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.parcoords.line.
colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.parcoo
rds.line.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
parcoords.line.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.parcoords.line.colorbar.Ti
tle` instance or dict with compatible properties
titlefont
Deprecated: Please use
parcoords.line.colorbar.title.font instead. Sets this
color bar's title font. Note that the title's font used
to be set by the now deprecated `titlefont` attribute.
titleside
Deprecated: Please use
parcoords.line.colorbar.title.side instead. Determines
the location of color bar's title with respect to the
color bar. Note that the title's location used to be
set by the now deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
ColorBar
"""
super(ColorBar, self).__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.parcoords.line.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.parcoords.line.ColorBar`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("borderwidth", None)
_v = borderwidth if borderwidth is not None else _v
if _v is not None:
self["borderwidth"] = _v
_v = arg.pop("dtick", None)
_v = dtick if dtick is not None else _v
if _v is not None:
self["dtick"] = _v
_v = arg.pop("exponentformat", None)
_v = exponentformat if exponentformat is not None else _v
if _v is not None:
self["exponentformat"] = _v
_v = arg.pop("len", None)
_v = len if len is not None else _v
if _v is not None:
self["len"] = _v
_v = arg.pop("lenmode", None)
_v = lenmode if lenmode is not None else _v
if _v is not None:
self["lenmode"] = _v
_v = arg.pop("minexponent", None)
_v = minexponent if minexponent is not None else _v
if _v is not None:
self["minexponent"] = _v
_v = arg.pop("nticks", None)
_v = nticks if nticks is not None else _v
if _v is not None:
self["nticks"] = _v
_v = arg.pop("outlinecolor", None)
_v = outlinecolor if outlinecolor is not None else _v
if _v is not None:
self["outlinecolor"] = _v
_v = arg.pop("outlinewidth", None)
_v = outlinewidth if outlinewidth is not None else _v
if _v is not None:
self["outlinewidth"] = _v
_v = arg.pop("separatethousands", None)
_v = separatethousands if separatethousands is not None else _v
if _v is not None:
self["separatethousands"] = _v
_v = arg.pop("showexponent", None)
_v = showexponent if showexponent is not None else _v
if _v is not None:
self["showexponent"] = _v
_v = arg.pop("showticklabels", None)
_v = showticklabels if showticklabels is not None else _v
if _v is not None:
self["showticklabels"] = _v
_v = arg.pop("showtickprefix", None)
_v = showtickprefix if showtickprefix is not None else _v
if _v is not None:
self["showtickprefix"] = _v
_v = arg.pop("showticksuffix", None)
_v = showticksuffix if showticksuffix is not None else _v
if _v is not None:
self["showticksuffix"] = _v
_v = arg.pop("thickness", None)
_v = thickness if thickness is not None else _v
if _v is not None:
self["thickness"] = _v
_v = arg.pop("thicknessmode", None)
_v = thicknessmode if thicknessmode is not None else _v
if _v is not None:
self["thicknessmode"] = _v
_v = arg.pop("tick0", None)
_v = tick0 if tick0 is not None else _v
if _v is not None:
self["tick0"] = _v
_v = arg.pop("tickangle", None)
_v = tickangle if tickangle is not None else _v
if _v is not None:
self["tickangle"] = _v
_v = arg.pop("tickcolor", None)
_v = tickcolor if tickcolor is not None else _v
if _v is not None:
self["tickcolor"] = _v
_v = arg.pop("tickfont", None)
_v = tickfont if tickfont is not None else _v
if _v is not None:
self["tickfont"] = _v
_v = arg.pop("tickformat", None)
_v = tickformat if tickformat is not None else _v
if _v is not None:
self["tickformat"] = _v
_v = arg.pop("tickformatstops", None)
_v = tickformatstops if tickformatstops is not None else _v
if _v is not None:
self["tickformatstops"] = _v
_v = arg.pop("tickformatstopdefaults", None)
_v = tickformatstopdefaults if tickformatstopdefaults is not None else _v
if _v is not None:
self["tickformatstopdefaults"] = _v
_v = arg.pop("ticklabeloverflow", None)
_v = ticklabeloverflow if ticklabeloverflow is not None else _v
if _v is not None:
self["ticklabeloverflow"] = _v
_v = arg.pop("ticklabelposition", None)
_v = ticklabelposition if ticklabelposition is not None else _v
if _v is not None:
self["ticklabelposition"] = _v
_v = arg.pop("ticklen", None)
_v = ticklen if ticklen is not None else _v
if _v is not None:
self["ticklen"] = _v
_v = arg.pop("tickmode", None)
_v = tickmode if tickmode is not None else _v
if _v is not None:
self["tickmode"] = _v
_v = arg.pop("tickprefix", None)
_v = tickprefix if tickprefix is not None else _v
if _v is not None:
self["tickprefix"] = _v
_v = arg.pop("ticks", None)
_v = ticks if ticks is not None else _v
if _v is not None:
self["ticks"] = _v
_v = arg.pop("ticksuffix", None)
_v = ticksuffix if ticksuffix is not None else _v
if _v is not None:
self["ticksuffix"] = _v
_v = arg.pop("ticktext", None)
_v = ticktext if ticktext is not None else _v
if _v is not None:
self["ticktext"] = _v
_v = arg.pop("ticktextsrc", None)
_v = ticktextsrc if ticktextsrc is not None else _v
if _v is not None:
self["ticktextsrc"] = _v
_v = arg.pop("tickvals", None)
_v = tickvals if tickvals is not None else _v
if _v is not None:
self["tickvals"] = _v
_v = arg.pop("tickvalssrc", None)
_v = tickvalssrc if tickvalssrc is not None else _v
if _v is not None:
self["tickvalssrc"] = _v
_v = arg.pop("tickwidth", None)
_v = tickwidth if tickwidth is not None else _v
if _v is not None:
self["tickwidth"] = _v
_v = arg.pop("title", None)
_v = title if title is not None else _v
if _v is not None:
self["title"] = _v
_v = arg.pop("titlefont", None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self["titlefont"] = _v
_v = arg.pop("titleside", None)
_v = titleside if titleside is not None else _v
if _v is not None:
self["titleside"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xanchor", None)
_v = xanchor if xanchor is not None else _v
if _v is not None:
self["xanchor"] = _v
_v = arg.pop("xpad", None)
_v = xpad if xpad is not None else _v
if _v is not None:
self["xpad"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("yanchor", None)
_v = yanchor if yanchor is not None else _v
if _v is not None:
self["yanchor"] = _v
_v = arg.pop("ypad", None)
_v = ypad if ypad is not None else _v
if _v is not None:
self["ypad"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit |
jonasjberg/autonameow | autonameow/vendor/dateutil/parser/_parser.py | 8 | 57607 | # -*- coding: utf-8 -*-
"""
This module offers a generic date/time string parser which is able to parse
most known formats to represent a date and/or time.
This module attempts to be forgiving with regards to unlikely input formats,
returning a datetime object even for dates which are ambiguous. If an element
of a date/time stamp is omitted, the following rules are applied:
- If AM or PM is left unspecified, a 24-hour clock is assumed, however, an hour
on a 12-hour clock (``0 <= hour <= 12``) *must* be specified if AM or PM is
specified.
- If a time zone is omitted, a timezone-naive datetime is returned.
If any other elements are missing, they are taken from the
:class:`datetime.datetime` object passed to the parameter ``default``. If this
results in a day number exceeding the valid number of days per month, the
value falls back to the end of the month.
Additional resources about date/time string formats can be found below:
- `A summary of the international standard date and time notation
<http://www.cl.cam.ac.uk/~mgk25/iso-time.html>`_
- `W3C Date and Time Formats <http://www.w3.org/TR/NOTE-datetime>`_
- `Time Formats (Planetary Rings Node) <https://pds-rings.seti.org:443/tools/time_formats.html>`_
- `CPAN ParseDate module
<http://search.cpan.org/~muir/Time-modules-2013.0912/lib/Time/ParseDate.pm>`_
- `Java SimpleDateFormat Class
<https://docs.oracle.com/javase/6/docs/api/java/text/SimpleDateFormat.html>`_
"""
from __future__ import unicode_literals
import datetime
import re
import string
import time
import warnings
from calendar import monthrange
from io import StringIO
import six
from six import binary_type, integer_types, text_type
from decimal import Decimal
from warnings import warn
from .. import relativedelta
from .. import tz
__all__ = ["parse", "parserinfo"]
# TODO: pandas.core.tools.datetimes imports this explicitly. Might be worth
# making public and/or figuring out if there is something we can
# take off their plate.
class _timelex(object):
# Fractional seconds are sometimes split by a comma
_split_decimal = re.compile("([.,])")
def __init__(self, instream):
if six.PY2:
# In Python 2, we can't duck type properly because unicode has
# a 'decode' function, and we'd be double-decoding
if isinstance(instream, (binary_type, bytearray)):
instream = instream.decode()
else:
if getattr(instream, 'decode', None) is not None:
instream = instream.decode()
if isinstance(instream, text_type):
instream = StringIO(instream)
elif getattr(instream, 'read', None) is None:
raise TypeError('Parser must be a string or character stream, not '
'{itype}'.format(itype=instream.__class__.__name__))
self.instream = instream
self.charstack = []
self.tokenstack = []
self.eof = False
def get_token(self):
"""
This function breaks the time string into lexical units (tokens), which
can be parsed by the parser. Lexical units are demarcated by changes in
the character set, so any continuous string of letters is considered
one unit, any continuous string of numbers is considered one unit.
The main complication arises from the fact that dots ('.') can be used
both as separators (e.g. "Sep.20.2009") or decimal points (e.g.
"4:30:21.447"). As such, it is necessary to read the full context of
any dot-separated strings before breaking it into tokens; as such, this
function maintains a "token stack", for when the ambiguous context
demands that multiple tokens be parsed at once.
"""
if self.tokenstack:
return self.tokenstack.pop(0)
seenletters = False
token = None
state = None
while not self.eof:
# We only realize that we've reached the end of a token when we
# find a character that's not part of the current token - since
# that character may be part of the next token, it's stored in the
# charstack.
if self.charstack:
nextchar = self.charstack.pop(0)
else:
nextchar = self.instream.read(1)
while nextchar == '\x00':
nextchar = self.instream.read(1)
if not nextchar:
self.eof = True
break
elif not state:
# First character of the token - determines if we're starting
# to parse a word, a number or something else.
token = nextchar
if self.isword(nextchar):
state = 'a'
elif self.isnum(nextchar):
state = '0'
elif self.isspace(nextchar):
token = ' '
break # emit token
else:
break # emit token
elif state == 'a':
# If we've already started reading a word, we keep reading
# letters until we find something that's not part of a word.
seenletters = True
if self.isword(nextchar):
token += nextchar
elif nextchar == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0':
# If we've already started reading a number, we keep reading
# numbers until we find something that doesn't fit.
if self.isnum(nextchar):
token += nextchar
elif nextchar == '.' or (nextchar == ',' and len(token) >= 2):
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == 'a.':
# If we've seen some letters and a dot separator, continue
# parsing, and the tokens will be broken up later.
seenletters = True
if nextchar == '.' or self.isword(nextchar):
token += nextchar
elif self.isnum(nextchar) and token[-1] == '.':
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0.':
# If we've seen at least one dot separator, keep going, we'll
# break up the tokens later.
if nextchar == '.' or self.isnum(nextchar):
token += nextchar
elif self.isword(nextchar) and token[-1] == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or
token[-1] in '.,')):
l = self._split_decimal.split(token)
token = l[0]
for tok in l[1:]:
if tok:
self.tokenstack.append(tok)
if state == '0.' and token.count('.') == 0:
token = token.replace(',', '.')
return token
def __iter__(self):
return self
def __next__(self):
token = self.get_token()
if token is None:
raise StopIteration
return token
def next(self):
return self.__next__() # Python 2.x support
@classmethod
def split(cls, s):
return list(cls(s))
@classmethod
def isword(cls, nextchar):
""" Whether or not the next character is part of a word """
return nextchar.isalpha()
@classmethod
def isnum(cls, nextchar):
""" Whether the next character is part of a number """
return nextchar.isdigit()
@classmethod
def isspace(cls, nextchar):
""" Whether the next character is whitespace """
return nextchar.isspace()
class _resultbase(object):
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def _repr(self, classname):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (classname, ", ".join(l))
def __len__(self):
return (sum(getattr(self, attr) is not None
for attr in self.__slots__))
def __repr__(self):
return self._repr(self.__class__.__name__)
class parserinfo(object):
"""
Class which handles what inputs are accepted. Subclass this to customize
the language and acceptable values for each parameter.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM
and YMD. Default is ``False``.
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken
to be the year, otherwise the last number is taken to be the year.
Default is ``False``.
"""
# m from a.m/p.m, t from ISO T separator
JUMP = [" ", ".", ",", ";", "-", "/", "'",
"at", "on", "and", "ad", "m", "t", "of",
"st", "nd", "rd", "th"]
WEEKDAYS = [("Mon", "Monday"),
("Tue", "Tuesday"), # TODO: "Tues"
("Wed", "Wednesday"),
("Thu", "Thursday"), # TODO: "Thurs"
("Fri", "Friday"),
("Sat", "Saturday"),
("Sun", "Sunday")]
MONTHS = [("Jan", "January"),
("Feb", "February"), # TODO: "Febr"
("Mar", "March"),
("Apr", "April"),
("May", "May"),
("Jun", "June"),
("Jul", "July"),
("Aug", "August"),
("Sep", "Sept", "September"),
("Oct", "October"),
("Nov", "November"),
("Dec", "December")]
HMS = [("h", "hour", "hours"),
("m", "minute", "minutes"),
("s", "second", "seconds")]
AMPM = [("am", "a"),
("pm", "p")]
UTCZONE = ["UTC", "GMT", "Z"]
PERTAIN = ["of"]
TZOFFSET = {}
# TODO: ERA = ["AD", "BC", "CE", "BCE", "Stardate",
# "Anno Domini", "Year of Our Lord"]
def __init__(self, dayfirst=False, yearfirst=False):
self._jump = self._convert(self.JUMP)
self._weekdays = self._convert(self.WEEKDAYS)
self._months = self._convert(self.MONTHS)
self._hms = self._convert(self.HMS)
self._ampm = self._convert(self.AMPM)
self._utczone = self._convert(self.UTCZONE)
self._pertain = self._convert(self.PERTAIN)
self.dayfirst = dayfirst
self.yearfirst = yearfirst
self._year = time.localtime().tm_year
self._century = self._year // 100 * 100
def _convert(self, lst):
dct = {}
for i, v in enumerate(lst):
if isinstance(v, tuple):
for v in v:
dct[v.lower()] = i
else:
dct[v.lower()] = i
return dct
def jump(self, name):
return name.lower() in self._jump
def weekday(self, name):
try:
return self._weekdays[name.lower()]
except KeyError:
pass
return None
def month(self, name):
try:
return self._months[name.lower()] + 1
except KeyError:
pass
return None
def hms(self, name):
try:
return self._hms[name.lower()]
except KeyError:
return None
def ampm(self, name):
try:
return self._ampm[name.lower()]
except KeyError:
return None
def pertain(self, name):
return name.lower() in self._pertain
def utczone(self, name):
return name.lower() in self._utczone
def tzoffset(self, name):
if name in self._utczone:
return 0
return self.TZOFFSET.get(name)
def convertyear(self, year, century_specified=False):
"""
Converts two-digit years to year within [-50, 49]
range of self._year (current local time)
"""
# Function contract is that the year is always positive
assert year >= 0
if year < 100 and not century_specified:
# assume current century to start
year += self._century
if year >= self._year + 50: # if too far in future
year -= 100
elif year < self._year - 50: # if too far in past
year += 100
return year
def validate(self, res):
# move to info
if res.year is not None:
res.year = self.convertyear(res.year, res.century_specified)
if res.tzoffset == 0 and not res.tzname or res.tzname == 'Z':
res.tzname = "UTC"
res.tzoffset = 0
elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname):
res.tzoffset = 0
return True
class _ymd(list):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.century_specified = False
self.dstridx = None
self.mstridx = None
self.ystridx = None
@property
def has_year(self):
return self.ystridx is not None
@property
def has_month(self):
return self.mstridx is not None
@property
def has_day(self):
return self.dstridx is not None
def could_be_day(self, value):
if self.has_day:
return False
elif not self.has_month:
return 1 <= value <= 31
elif not self.has_year:
# Be permissive, assume leapyear
month = self[self.mstridx]
return 1 <= value <= monthrange(2000, month)[1]
else:
month = self[self.mstridx]
year = self[self.ystridx]
return 1 <= value <= monthrange(year, month)[1]
def append(self, val, label=None):
if hasattr(val, '__len__'):
if val.isdigit() and len(val) > 2:
self.century_specified = True
if label not in [None, 'Y']: # pragma: no cover
raise ValueError(label)
label = 'Y'
elif val > 100:
self.century_specified = True
if label not in [None, 'Y']: # pragma: no cover
raise ValueError(label)
label = 'Y'
super(self.__class__, self).append(int(val))
if label == 'M':
if self.has_month:
raise ValueError('Month is already set')
self.mstridx = len(self) - 1
elif label == 'D':
if self.has_day:
raise ValueError('Day is already set')
self.dstridx = len(self) - 1
elif label == 'Y':
if self.has_year:
raise ValueError('Year is already set')
self.ystridx = len(self) - 1
def _resolve_from_stridxs(self, strids):
"""
Try to resolve the identities of year/month/day elements using
ystridx, mstridx, and dstridx, if enough of these are specified.
"""
if len(self) == 3 and len(strids) == 2:
# we can back out the remaining stridx value
missing = [x for x in range(3) if x not in strids.values()]
key = [x for x in ['y', 'm', 'd'] if x not in strids]
assert len(missing) == len(key) == 1
key = key[0]
val = missing[0]
strids[key] = val
assert len(self) == len(strids) # otherwise this should not be called
out = {key: self[strids[key]] for key in strids}
return (out.get('y'), out.get('m'), out.get('d'))
def resolve_ymd(self, yearfirst, dayfirst):
len_ymd = len(self)
year, month, day = (None, None, None)
strids = (('y', self.ystridx),
('m', self.mstridx),
('d', self.dstridx))
strids = {key: val for key, val in strids if val is not None}
if (len(self) == len(strids) > 0 or
(len(self) == 3 and len(strids) == 2)):
return self._resolve_from_stridxs(strids)
mstridx = self.mstridx
if len_ymd > 3:
raise ValueError("More than three YMD values")
elif len_ymd == 1 or (mstridx is not None and len_ymd == 2):
# One member, or two members with a month string
if mstridx is not None:
month = self[mstridx]
# since mstridx is 0 or 1, self[mstridx-1] always
# looks up the other element
other = self[mstridx - 1]
else:
other = self[0]
if len_ymd > 1 or mstridx is None:
if other > 31:
year = other
else:
day = other
elif len_ymd == 2:
# Two members with numbers
if self[0] > 31:
# 99-01
year, month = self
elif self[1] > 31:
# 01-99
month, year = self
elif dayfirst and self[1] <= 12:
# 13-01
day, month = self
else:
# 01-13
month, day = self
elif len_ymd == 3:
# Three members
if mstridx == 0:
if self[1] > 31:
# Apr-2003-25
month, year, day = self
else:
month, day, year = self
elif mstridx == 1:
if self[0] > 31 or (yearfirst and self[2] <= 31):
# 99-Jan-01
year, month, day = self
else:
# 01-Jan-01
# Give precendence to day-first, since
# two-digit years is usually hand-written.
day, month, year = self
elif mstridx == 2:
# WTF!?
if self[1] > 31:
# 01-99-Jan
day, year, month = self
else:
# 99-01-Jan
year, day, month = self
else:
if (self[0] > 31 or
self.ystridx == 0 or
(yearfirst and self[1] <= 12 and self[2] <= 31)):
# 99-01-01
if dayfirst and self[2] <= 12:
year, day, month = self
else:
year, month, day = self
elif self[0] > 12 or (dayfirst and self[1] <= 12):
# 13-01-01
day, month, year = self
else:
# 01-13-01
month, day, year = self
return year, month, day
class parser(object):
def __init__(self, info=None):
self.info = info or parserinfo()
def parse(self, timestr, default=None,
ignoretz=False, tzinfos=None, **kwargs):
"""
Parse the date/time string into a :class:`datetime.datetime` object.
:param timestr:
Any date/time string using the supported formats.
:param default:
The default datetime object, if this is a datetime object and not
``None``, elements specified in ``timestr`` replace elements in the
default object.
:param ignoretz:
If set ``True``, time zones in parsed strings are ignored and a
naive :class:`datetime.datetime` object is returned.
:param tzinfos:
Additional time zone names / aliases which may be present in the
string. This argument maps time zone names (and optionally offsets
from those time zones) to time zones. This parameter can be a
dictionary with timezone aliases mapping time zone names to time
zones or a function taking two parameters (``tzname`` and
``tzoffset``) and returning a time zone.
The timezones to which the names are mapped can be an integer
offset from UTC in seconds or a :class:`tzinfo` object.
.. doctest::
:options: +NORMALIZE_WHITESPACE
>>> from dateutil.parser import parse
>>> from dateutil.tz import gettz
>>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")}
>>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200))
>>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21,
tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
This parameter is ignored if ``ignoretz`` is set.
:param \\*\\*kwargs:
Keyword arguments as passed to ``_parse()``.
:return:
Returns a :class:`datetime.datetime` object or, if the
``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
first element being a :class:`datetime.datetime` object, the second
a tuple containing the fuzzy tokens.
:raises ValueError:
Raised for invalid or unknown string format, if the provided
:class:`tzinfo` is not in a valid format, or if an invalid date
would be created.
:raises TypeError:
Raised for non-string or character stream input.
:raises OverflowError:
Raised if the parsed date exceeds the largest valid C integer on
your system.
"""
if default is None:
default = datetime.datetime.now().replace(hour=0, minute=0,
second=0, microsecond=0)
res, skipped_tokens = self._parse(timestr, **kwargs)
if res is None:
raise ValueError("Unknown string format:", timestr)
if len(res) == 0:
raise ValueError("String does not contain a date:", timestr)
ret = self._build_naive(res, default)
if not ignoretz:
ret = self._build_tzaware(ret, res, tzinfos)
if kwargs.get('fuzzy_with_tokens', False):
return ret, skipped_tokens
else:
return ret
class _result(_resultbase):
__slots__ = ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond",
"tzname", "tzoffset", "ampm","any_unused_tokens"]
def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False,
fuzzy_with_tokens=False):
"""
Private method which performs the heavy lifting of parsing, called from
``parse()``, which passes on its ``kwargs`` to this function.
:param timestr:
The string to parse.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM
and YMD. If set to ``None``, this value is retrieved from the
current :class:`parserinfo` object (which itself defaults to
``False``).
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken
to be the year, otherwise the last number is taken to be the year.
If this is set to ``None``, the value is retrieved from the current
:class:`parserinfo` object (which itself defaults to ``False``).
:param fuzzy:
Whether to allow fuzzy parsing, allowing for string like "Today is
January 1, 2047 at 8:21:00AM".
:param fuzzy_with_tokens:
If ``True``, ``fuzzy`` is automatically set to True, and the parser
will return a tuple where the first element is the parsed
:class:`datetime.datetime` datetimestamp and the second element is
a tuple containing the portions of the string which were ignored:
.. doctest::
>>> from dateutil.parser import parse
>>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
(datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
"""
if fuzzy_with_tokens:
fuzzy = True
info = self.info
if dayfirst is None:
dayfirst = info.dayfirst
if yearfirst is None:
yearfirst = info.yearfirst
res = self._result()
l = _timelex.split(timestr) # Splits the timestr into tokens
skipped_idxs = []
# year/month/day list
ymd = _ymd()
len_l = len(l)
i = 0
try:
while i < len_l:
# Check if it's a number
value_repr = l[i]
try:
value = float(value_repr)
except ValueError:
value = None
if value is not None:
# Numeric token
i = self._parse_numeric_token(l, i, info, ymd, res, fuzzy)
# Check weekday
elif info.weekday(l[i]) is not None:
value = info.weekday(l[i])
res.weekday = value
# Check month name
elif info.month(l[i]) is not None:
value = info.month(l[i])
ymd.append(value, 'M')
if i + 1 < len_l:
if l[i + 1] in ('-', '/'):
# Jan-01[-99]
sep = l[i + 1]
ymd.append(l[i + 2])
if i + 3 < len_l and l[i + 3] == sep:
# Jan-01-99
ymd.append(l[i + 4])
i += 2
i += 2
elif (i + 4 < len_l and l[i + 1] == l[i + 3] == ' ' and
info.pertain(l[i + 2])):
# Jan of 01
# In this case, 01 is clearly year
if l[i + 4].isdigit():
# Convert it here to become unambiguous
value = int(l[i + 4])
year = str(info.convertyear(value))
ymd.append(year, 'Y')
else:
# Wrong guess
pass
# TODO: not hit in tests
i += 4
# Check am/pm
elif info.ampm(l[i]) is not None:
value = info.ampm(l[i])
val_is_ampm = self._ampm_valid(res.hour, res.ampm, fuzzy)
if val_is_ampm:
res.hour = self._adjust_ampm(res.hour, value)
res.ampm = value
elif fuzzy:
skipped_idxs.append(i)
# Check for a timezone name
elif self._could_be_tzname(res.hour, res.tzname, res.tzoffset, l[i]):
res.tzname = l[i]
res.tzoffset = info.tzoffset(res.tzname)
# Check for something like GMT+3, or BRST+3. Notice
# that it doesn't mean "I am 3 hours after GMT", but
# "my time +3 is GMT". If found, we reverse the
# logic so that timezone parsing code will get it
# right.
if i + 1 < len_l and l[i + 1] in ('+', '-'):
l[i + 1] = ('+', '-')[l[i + 1] == '+']
res.tzoffset = None
if info.utczone(res.tzname):
# With something like GMT+3, the timezone
# is *not* GMT.
res.tzname = None
# Check for a numbered timezone
elif res.hour is not None and l[i] in ('+', '-'):
signal = (-1, 1)[l[i] == '+']
len_li = len(l[i + 1])
# TODO: check that l[i + 1] is integer?
if len_li == 4:
# -0300
hour_offset = int(l[i + 1][:2])
min_offset = int(l[i + 1][2:])
elif i + 2 < len_l and l[i + 2] == ':':
# -03:00
hour_offset = int(l[i + 1])
min_offset = int(l[i + 3]) # TODO: Check that l[i+3] is minute-like?
i += 2
elif len_li <= 2:
# -[0]3
hour_offset = int(l[i + 1][:2])
min_offset = 0
else:
raise ValueError(timestr)
res.tzoffset = signal * (hour_offset * 3600 + min_offset * 60)
# Look for a timezone name between parenthesis
if (i + 5 < len_l and
info.jump(l[i + 2]) and l[i + 3] == '(' and
l[i + 5] == ')' and
3 <= len(l[i + 4]) and
self._could_be_tzname(res.hour, res.tzname,
None, l[i + 4])):
# -0300 (BRST)
res.tzname = l[i + 4]
i += 4
i += 1
# Check jumps
elif not (info.jump(l[i]) or fuzzy):
raise ValueError(timestr)
else:
skipped_idxs.append(i)
i += 1
# Process year/month/day
year, month, day = ymd.resolve_ymd(yearfirst, dayfirst)
res.century_specified = ymd.century_specified
res.year = year
res.month = month
res.day = day
except (IndexError, ValueError):
return None, None
if not info.validate(res):
return None, None
if fuzzy_with_tokens:
skipped_tokens = self._recombine_skipped(l, skipped_idxs)
return res, tuple(skipped_tokens)
else:
return res, None
def _parse_numeric_token(self, tokens, idx, info, ymd, res, fuzzy):
# Token is a number
value_repr = tokens[idx]
try:
value = self._to_decimal(value_repr)
except Exception as e:
six.raise_from(ValueError('Unknown numeric token'), e)
len_li = len(value_repr)
len_l = len(tokens)
if (len(ymd) == 3 and len_li in (2, 4) and
res.hour is None and
(idx + 1 >= len_l or
(tokens[idx + 1] != ':' and
info.hms(tokens[idx + 1]) is None))):
# 19990101T23[59]
s = tokens[idx]
res.hour = int(s[:2])
if len_li == 4:
res.minute = int(s[2:])
elif len_li == 6 or (len_li > 6 and tokens[idx].find('.') == 6):
# YYMMDD or HHMMSS[.ss]
s = tokens[idx]
if not ymd and '.' not in tokens[idx]:
ymd.append(s[:2])
ymd.append(s[2:4])
ymd.append(s[4:])
else:
# 19990101T235959[.59]
# TODO: Check if res attributes already set.
res.hour = int(s[:2])
res.minute = int(s[2:4])
res.second, res.microsecond = self._parsems(s[4:])
elif len_li in (8, 12, 14):
# YYYYMMDD
s = tokens[idx]
ymd.append(s[:4], 'Y')
ymd.append(s[4:6])
ymd.append(s[6:8])
if len_li > 8:
res.hour = int(s[8:10])
res.minute = int(s[10:12])
if len_li > 12:
res.second = int(s[12:])
elif self._find_hms_idx(idx, tokens, info, allow_jump=True) is not None:
# HH[ ]h or MM[ ]m or SS[.ss][ ]s
hms_idx = self._find_hms_idx(idx, tokens, info, allow_jump=True)
(idx, hms) = self._parse_hms(idx, tokens, info, hms_idx)
if hms is not None:
# TODO: checking that hour/minute/second are not
# already set?
self._assign_hms(res, value_repr, hms)
elif idx + 2 < len_l and tokens[idx + 1] == ':':
# HH:MM[:SS[.ss]]
res.hour = int(value)
value = self._to_decimal(tokens[idx + 2]) # TODO: try/except for this?
(res.minute, res.second) = self._parse_min_sec(value)
if idx + 4 < len_l and tokens[idx + 3] == ':':
res.second, res.microsecond = self._parsems(tokens[idx + 4])
idx += 2
idx += 2
elif idx + 1 < len_l and tokens[idx + 1] in ('-', '/', '.'):
sep = tokens[idx + 1]
ymd.append(value_repr)
if idx + 2 < len_l and not info.jump(tokens[idx + 2]):
if tokens[idx + 2].isdigit():
# 01-01[-01]
ymd.append(tokens[idx + 2])
else:
# 01-Jan[-01]
value = info.month(tokens[idx + 2])
if value is not None:
ymd.append(value, 'M')
else:
raise ValueError()
if idx + 3 < len_l and tokens[idx + 3] == sep:
# We have three members
value = info.month(tokens[idx + 4])
if value is not None:
ymd.append(value, 'M')
else:
ymd.append(tokens[idx + 4])
idx += 2
idx += 1
idx += 1
elif idx + 1 >= len_l or info.jump(tokens[idx + 1]):
if idx + 2 < len_l and info.ampm(tokens[idx + 2]) is not None:
# 12 am
hour = int(value)
res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 2]))
idx += 1
else:
# Year, month or day
ymd.append(value)
idx += 1
elif info.ampm(tokens[idx + 1]) is not None and (0 <= value < 24):
# 12am
hour = int(value)
res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 1]))
idx += 1
elif ymd.could_be_day(value):
ymd.append(value)
elif not fuzzy:
raise ValueError()
return idx
def _find_hms_idx(self, idx, tokens, info, allow_jump):
len_l = len(tokens)
if idx+1 < len_l and info.hms(tokens[idx+1]) is not None:
# There is an "h", "m", or "s" label following this token. We take
# assign the upcoming label to the current token.
# e.g. the "12" in 12h"
hms_idx = idx + 1
elif (allow_jump and idx+2 < len_l and tokens[idx+1] == ' ' and
info.hms(tokens[idx+2]) is not None):
# There is a space and then an "h", "m", or "s" label.
# e.g. the "12" in "12 h"
hms_idx = idx + 2
elif idx > 0 and info.hms(tokens[idx-1]) is not None:
# There is a "h", "m", or "s" preceeding this token. Since neither
# of the previous cases was hit, there is no label following this
# token, so we use the previous label.
# e.g. the "04" in "12h04"
hms_idx = idx-1
elif (1 < idx == len_l-1 and tokens[idx-1] == ' ' and
info.hms(tokens[idx-2]) is not None):
# If we are looking at the final token, we allow for a
# backward-looking check to skip over a space.
# TODO: Are we sure this is the right condition here?
hms_idx = idx - 2
else:
hms_idx = None
return hms_idx
def _assign_hms(self, res, value_repr, hms):
# See GH issue #427, fixing float rounding
value = self._to_decimal(value_repr)
if hms == 0:
# Hour
res.hour = int(value)
if value % 1:
res.minute = int(60*(value % 1))
elif hms == 1:
(res.minute, res.second) = self._parse_min_sec(value)
elif hms == 2:
(res.second, res.microsecond) = self._parsems(value_repr)
def _could_be_tzname(self, hour, tzname, tzoffset, token):
return (hour is not None and
tzname is None and
tzoffset is None and
len(token) <= 5 and
all(x in string.ascii_uppercase for x in token))
def _ampm_valid(self, hour, ampm, fuzzy):
"""
For fuzzy parsing, 'a' or 'am' (both valid English words)
may erroneously trigger the AM/PM flag. Deal with that
here.
"""
val_is_ampm = True
# If there's already an AM/PM flag, this one isn't one.
if fuzzy and ampm is not None:
val_is_ampm = False
# If AM/PM is found and hour is not, raise a ValueError
if hour is None:
if fuzzy:
val_is_ampm = False
else:
raise ValueError('No hour specified with AM or PM flag.')
elif not 0 <= hour <= 12:
# If AM/PM is found, it's a 12 hour clock, so raise
# an error for invalid range
if fuzzy:
val_is_ampm = False
else:
raise ValueError('Invalid hour specified for 12-hour clock.')
return val_is_ampm
def _adjust_ampm(self, hour, ampm):
if hour < 12 and ampm == 1:
hour += 12
elif hour == 12 and ampm == 0:
hour = 0
return hour
def _parse_min_sec(self, value):
# TODO: Every usage of this function sets res.second to the return
# value. Are there any cases where second will be returned as None and
# we *dont* want to set res.second = None?
minute = int(value)
second = None
sec_remainder = value % 1
if sec_remainder:
second = int(60 * sec_remainder)
return (minute, second)
def _parsems(self, value):
"""Parse a I[.F] seconds value into (seconds, microseconds)."""
if "." not in value:
return int(value), 0
else:
i, f = value.split(".")
return int(i), int(f.ljust(6, "0")[:6])
def _parse_hms(self, idx, tokens, info, hms_idx):
# TODO: Is this going to admit a lot of false-positives for when we
# just happen to have digits and "h", "m" or "s" characters in non-date
# text? I guess hex hashes won't have that problem, but there's plenty
# of random junk out there.
if hms_idx is None:
hms = None
new_idx = idx
elif hms_idx > idx:
hms = info.hms(tokens[hms_idx])
new_idx = hms_idx
else:
# Looking backwards, increment one.
hms = info.hms(tokens[hms_idx]) + 1
new_idx = idx
return (new_idx, hms)
def _recombine_skipped(self, tokens, skipped_idxs):
"""
>>> tokens = ["foo", " ", "bar", " ", "19June2000", "baz"]
>>> skipped_idxs = [0, 1, 2, 5]
>>> _recombine_skipped(tokens, skipped_idxs)
["foo bar", "baz"]
"""
skipped_tokens = []
for i, idx in enumerate(sorted(skipped_idxs)):
if i > 0 and idx - 1 == skipped_idxs[i - 1]:
skipped_tokens[-1] = skipped_tokens[-1] + tokens[idx]
else:
skipped_tokens.append(tokens[idx])
return skipped_tokens
def _build_tzinfo(self, tzinfos, tzname, tzoffset):
if callable(tzinfos):
tzdata = tzinfos(tzname, tzoffset)
else:
tzdata = tzinfos.get(tzname)
# handle case where tzinfo is paased an options that returns None
# eg tzinfos = {'BRST' : None}
if isinstance(tzdata, datetime.tzinfo) or tzdata is None:
tzinfo = tzdata
elif isinstance(tzdata, text_type):
tzinfo = tz.tzstr(tzdata)
elif isinstance(tzdata, integer_types):
tzinfo = tz.tzoffset(tzname, tzdata)
return tzinfo
def _build_tzaware(self, naive, res, tzinfos):
if (callable(tzinfos) or (tzinfos and res.tzname in tzinfos)):
tzinfo = self._build_tzinfo(tzinfos, res.tzname, res.tzoffset)
aware = naive.replace(tzinfo=tzinfo)
aware = self._assign_tzname(aware, res.tzname)
elif res.tzname and res.tzname in time.tzname:
aware = naive.replace(tzinfo=tz.tzlocal())
# Handle ambiguous local datetime
aware = self._assign_tzname(aware, res.tzname)
# This is mostly relevant for winter GMT zones parsed in the UK
if (aware.tzname() != res.tzname and
res.tzname in self.info.UTCZONE):
aware = aware.replace(tzinfo=tz.tzutc())
elif res.tzoffset == 0:
aware = naive.replace(tzinfo=tz.tzutc())
elif res.tzoffset:
aware = naive.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset))
elif not res.tzname and not res.tzoffset:
# i.e. no timezone information was found.
aware = naive
elif res.tzname:
# tz-like string was parsed but we don't know what to do
# with it
warnings.warn("tzname {tzname} identified but not understood. "
"Pass `tzinfos` argument in order to correctly "
"return a timezone-aware datetime. In a future "
"version, this will raise an "
"exception.".format(tzname=res.tzname),
category=UnknownTimezoneWarning)
aware = naive
return aware
def _build_naive(self, res, default):
repl = {}
for attr in ("year", "month", "day", "hour",
"minute", "second", "microsecond"):
value = getattr(res, attr)
if value is not None:
repl[attr] = value
if 'day' not in repl:
# If the default day exceeds the last day of the month, fall back
# to the end of the month.
cyear = default.year if res.year is None else res.year
cmonth = default.month if res.month is None else res.month
cday = default.day if res.day is None else res.day
if cday > monthrange(cyear, cmonth)[1]:
repl['day'] = monthrange(cyear, cmonth)[1]
naive = default.replace(**repl)
if res.weekday is not None and not res.day:
naive = naive + relativedelta.relativedelta(weekday=res.weekday)
return naive
def _assign_tzname(self, dt, tzname):
if dt.tzname() != tzname:
new_dt = tz.enfold(dt, fold=1)
if new_dt.tzname() == tzname:
return new_dt
return dt
def _to_decimal(self, val):
try:
decimal_value = Decimal(val)
# See GH 662, edge case, infinite value should not be converted via `_to_decimal`
if not decimal_value.is_finite():
raise ValueError("Converted decimal value is infinite or NaN")
except Exception as e:
msg = "Could not convert %s to decimal" % val
six.raise_from(ValueError(msg), e)
else:
return decimal_value
DEFAULTPARSER = parser()
def parse(timestr, parserinfo=None, **kwargs):
"""
Parse a string in one of the supported formats, using the
``parserinfo`` parameters.
:param timestr:
A string containing a date/time stamp.
:param parserinfo:
A :class:`parserinfo` object containing parameters for the parser.
If ``None``, the default arguments to the :class:`parserinfo`
constructor are used.
The ``**kwargs`` parameter takes the following keyword arguments:
:param default:
The default datetime object, if this is a datetime object and not
``None``, elements specified in ``timestr`` replace elements in the
default object.
:param ignoretz:
If set ``True``, time zones in parsed strings are ignored and a naive
:class:`datetime` object is returned.
:param tzinfos:
Additional time zone names / aliases which may be present in the
string. This argument maps time zone names (and optionally offsets
from those time zones) to time zones. This parameter can be a
dictionary with timezone aliases mapping time zone names to time
zones or a function taking two parameters (``tzname`` and
``tzoffset``) and returning a time zone.
The timezones to which the names are mapped can be an integer
offset from UTC in seconds or a :class:`tzinfo` object.
.. doctest::
:options: +NORMALIZE_WHITESPACE
>>> from dateutil.parser import parse
>>> from dateutil.tz import gettz
>>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")}
>>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200))
>>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21,
tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
This parameter is ignored if ``ignoretz`` is set.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM and
YMD. If set to ``None``, this value is retrieved from the current
:class:`parserinfo` object (which itself defaults to ``False``).
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken to
be the year, otherwise the last number is taken to be the year. If
this is set to ``None``, the value is retrieved from the current
:class:`parserinfo` object (which itself defaults to ``False``).
:param fuzzy:
Whether to allow fuzzy parsing, allowing for string like "Today is
January 1, 2047 at 8:21:00AM".
:param fuzzy_with_tokens:
If ``True``, ``fuzzy`` is automatically set to True, and the parser
will return a tuple where the first element is the parsed
:class:`datetime.datetime` datetimestamp and the second element is
a tuple containing the portions of the string which were ignored:
.. doctest::
>>> from dateutil.parser import parse
>>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
(datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
:return:
Returns a :class:`datetime.datetime` object or, if the
``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
first element being a :class:`datetime.datetime` object, the second
a tuple containing the fuzzy tokens.
:raises ValueError:
Raised for invalid or unknown string format, if the provided
:class:`tzinfo` is not in a valid format, or if an invalid date
would be created.
:raises OverflowError:
Raised if the parsed date exceeds the largest valid C integer on
your system.
"""
if parserinfo:
return parser(parserinfo).parse(timestr, **kwargs)
else:
return DEFAULTPARSER.parse(timestr, **kwargs)
class _tzparser(object):
class _result(_resultbase):
__slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset",
"start", "end"]
class _attr(_resultbase):
__slots__ = ["month", "week", "weekday",
"yday", "jyday", "day", "time"]
def __repr__(self):
return self._repr("")
def __init__(self):
_resultbase.__init__(self)
self.start = self._attr()
self.end = self._attr()
def parse(self, tzstr):
res = self._result()
l = [x for x in re.split(r'([,:.]|[a-zA-Z]+|[0-9]+)',tzstr) if x]
used_idxs = list()
try:
len_l = len(l)
i = 0
while i < len_l:
# BRST+3[BRDT[+2]]
j = i
while j < len_l and not [x for x in l[j]
if x in "0123456789:,-+"]:
j += 1
if j != i:
if not res.stdabbr:
offattr = "stdoffset"
res.stdabbr = "".join(l[i:j])
else:
offattr = "dstoffset"
res.dstabbr = "".join(l[i:j])
for ii in range(j):
used_idxs.append(ii)
i = j
if (i < len_l and (l[i] in ('+', '-') or l[i][0] in
"0123456789")):
if l[i] in ('+', '-'):
# Yes, that's right. See the TZ variable
# documentation.
signal = (1, -1)[l[i] == '+']
used_idxs.append(i)
i += 1
else:
signal = -1
len_li = len(l[i])
if len_li == 4:
# -0300
setattr(res, offattr, (int(l[i][:2]) * 3600 +
int(l[i][2:]) * 60) * signal)
elif i + 1 < len_l and l[i + 1] == ':':
# -03:00
setattr(res, offattr,
(int(l[i]) * 3600 +
int(l[i + 2]) * 60) * signal)
used_idxs.append(i)
i += 2
elif len_li <= 2:
# -[0]3
setattr(res, offattr,
int(l[i][:2]) * 3600 * signal)
else:
return None
used_idxs.append(i)
i += 1
if res.dstabbr:
break
else:
break
if i < len_l:
for j in range(i, len_l):
if l[j] == ';':
l[j] = ','
assert l[i] == ','
i += 1
if i >= len_l:
pass
elif (8 <= l.count(',') <= 9 and
not [y for x in l[i:] if x != ','
for y in x if y not in "0123456789+-"]):
# GMT0BST,3,0,30,3600,10,0,26,7200[,3600]
for x in (res.start, res.end):
x.month = int(l[i])
used_idxs.append(i)
i += 2
if l[i] == '-':
value = int(l[i + 1]) * -1
used_idxs.append(i)
i += 1
else:
value = int(l[i])
used_idxs.append(i)
i += 2
if value:
x.week = value
x.weekday = (int(l[i]) - 1) % 7
else:
x.day = int(l[i])
used_idxs.append(i)
i += 2
x.time = int(l[i])
used_idxs.append(i)
i += 2
if i < len_l:
if l[i] in ('-', '+'):
signal = (-1, 1)[l[i] == "+"]
used_idxs.append(i)
i += 1
else:
signal = 1
used_idxs.append(i)
res.dstoffset = (res.stdoffset + int(l[i]) * signal)
# This was a made-up format that is not in normal use
warn(('Parsed time zone "%s"' % tzstr) +
'is in a non-standard dateutil-specific format, which ' +
'is now deprecated; support for parsing this format ' +
'will be removed in future versions. It is recommended ' +
'that you switch to a standard format like the GNU ' +
'TZ variable format.', tz.DeprecatedTzFormatWarning)
elif (l.count(',') == 2 and l[i:].count('/') <= 2 and
not [y for x in l[i:] if x not in (',', '/', 'J', 'M',
'.', '-', ':')
for y in x if y not in "0123456789"]):
for x in (res.start, res.end):
if l[i] == 'J':
# non-leap year day (1 based)
used_idxs.append(i)
i += 1
x.jyday = int(l[i])
elif l[i] == 'M':
# month[-.]week[-.]weekday
used_idxs.append(i)
i += 1
x.month = int(l[i])
used_idxs.append(i)
i += 1
assert l[i] in ('-', '.')
used_idxs.append(i)
i += 1
x.week = int(l[i])
if x.week == 5:
x.week = -1
used_idxs.append(i)
i += 1
assert l[i] in ('-', '.')
used_idxs.append(i)
i += 1
x.weekday = (int(l[i]) - 1) % 7
else:
# year day (zero based)
x.yday = int(l[i]) + 1
used_idxs.append(i)
i += 1
if i < len_l and l[i] == '/':
used_idxs.append(i)
i += 1
# start time
len_li = len(l[i])
if len_li == 4:
# -0300
x.time = (int(l[i][:2]) * 3600 +
int(l[i][2:]) * 60)
elif i + 1 < len_l and l[i + 1] == ':':
# -03:00
x.time = int(l[i]) * 3600 + int(l[i + 2]) * 60
used_idxs.append(i)
i += 2
if i + 1 < len_l and l[i + 1] == ':':
used_idxs.append(i)
i += 2
x.time += int(l[i])
elif len_li <= 2:
# -[0]3
x.time = (int(l[i][:2]) * 3600)
else:
return None
used_idxs.append(i)
i += 1
assert i == len_l or l[i] == ','
i += 1
assert i >= len_l
except (IndexError, ValueError, AssertionError):
return None
unused_idxs = set(range(len_l)).difference(used_idxs)
res.any_unused_tokens = not {l[n] for n in unused_idxs}.issubset({",",":"})
return res
DEFAULTTZPARSER = _tzparser()
def _parsetz(tzstr):
return DEFAULTTZPARSER.parse(tzstr)
class UnknownTimezoneWarning(RuntimeWarning):
"""Raised when the parser finds a timezone it cannot parse into a tzinfo"""
# vim:ts=4:sw=4:et
| gpl-2.0 |
stevetjoa/musicsearch | mirgui.py | 1 | 40317 |
import os
import pprint
import random
import wx
import numpy
# The recommended way to use wx with mpl is with the WXAgg
# backend.
#
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigCanvas, \
NavigationToolbar2WxAgg as NavigationToolbar
import mir2
import mir
from matplotlib.colors import LogNorm
import main
class mirgui(wx.Frame):
""" The main frame of the application
"""
title = 'Music Search Application'
def __init__(self):
wx.Frame.__init__(self, None, -1, self.title)
self.data = [5, 6, 9, 14]
self.create_menu()
self.create_status_bar()
self.create_main_panel()
#self.textbox.SetValue(' '.join(map(str, self.data)))
self.draw_figure()
print 'Training.'
self.musicsearch = main.Search(8, 32)
for f in os.listdir('train'):
print f
x, fs, enc = mir.wavread('train/'+f)
self.musicsearch.add(x, fs, f)
print 'Done training.'
def create_menu(self):
self.menubar = wx.MenuBar()
menu_file = wx.Menu()
m_browse = menu_file.Append(-1, "&Import *.wav file...", "Shows a File Dialog")
self.Bind(wx.EVT_MENU, self.openfile, m_browse)
m_key = menu_file.Append(-1, "&Estimate Key...", "Estimates Key of the Entire wav file")
self.Bind(wx.EVT_MENU, self.est_key, m_key)
m_expt = menu_file.Append(-1, "&Save plot\tCtrl-S", "Save plot to file")
self.Bind(wx.EVT_MENU, self.on_save_plot, m_expt)
menu_file.AppendSeparator()
m_exit = menu_file.Append(-1, "E&xit\tCtrl-X", "Exit")
self.Bind(wx.EVT_MENU, self.on_exit, m_exit)
menu_edit = wx.Menu()
m_reset = menu_edit.Append(-1, "&Reset Parameters...", "Resets plot parameters to Default Values")
self.Bind(wx.EVT_MENU, self.on_reset, m_reset)
m_lognorm = menu_edit.AppendCheckItem(-1, "Log-Norm", "Plot gram values using Log Normalized spectrum")
self.Bind(wx.EVT_MENU, self.on_log_norm, m_lognorm)
m_WC1 = menu_edit.Append(-1, 'Adjust Input Plot', kind=wx.ITEM_RADIO)
self.Bind(wx.EVT_MENU,self.which_canvas1, m_WC1)
m_WC2 = menu_edit.Append(-1, 'Adjust Output Plot', kind=wx.ITEM_RADIO)
self.Bind(wx.EVT_MENU,self.which_canvas2, m_WC2)
menu_help = wx.Menu()
m_about = menu_help.Append(-1, "&About\tF1", "About the demo")
self.Bind(wx.EVT_MENU, self.on_about, m_about)
self.menubar.Append(menu_file, "&File")
self.menubar.Append(menu_edit, "&Edit")
self.menubar.Append(menu_help, "&Help")
self.SetMenuBar(self.menubar)
def create_main_panel(self):
""" Creates the main panel with all the controls on it:
* mpl canvas
* mpl navigation toolbar
* Control panel for interaction
"""
self.panel = wx.Panel(self)
# Create the mpl Figure and FigCanvas objects.
# 5x4 inches, 100 dots-per-inch
#
self.dpi = 100
self.fig = Figure((3.0, 3.0), dpi=self.dpi)
self.canvas = FigCanvas(self.panel, -1, self.fig)
self.canvas2= FigCanvas(self.panel, -1, self.fig)
# Since we have only one plot, we can use add_axes
# instead of add_subplot, but then the subplot
# configuration tool in the navigation toolbar wouldn't
# work.
#
self.axes = self.fig.add_subplot(111)
# Bind the 'pick' event for clicking on one of the bars
#
self.canvas.mpl_connect('pick_event', self.on_pick)
self.drawbutton = wx.Button(self.panel, -1, "Plot Gram")
self.Bind(wx.EVT_BUTTON, self.on_draw_button, self.drawbutton)
self.plot_select = ['Time Domain Signal', 'Spectrogram','Constant Q Spectrogram', 'Chromagram']
self.combo = wx.ComboBox(self.panel, -1, pos = (0,400), choices = self.plot_select, style=wx.ALIGN_LEFT | wx.CB_READONLY)
self.combo.SetSelection(2)
self.setbutton = wx.Button(self.panel, -1, "Set Parameters")
self.Bind(wx.EVT_BUTTON, self.on_set_button, self.setbutton)
self.record = wx.BitmapButton(self.panel, -1, wx.Bitmap('record.png'))
self.Bind(wx.EVT_BUTTON, self.on_rec, self.record)
self.play = wx.BitmapButton(self.panel, -1, wx.Bitmap('play.png'))
self.Bind(wx.EVT_BUTTON, self.on_play, self.play)
self.stop = wx.BitmapButton(self.panel, -1, wx.Bitmap('stop.png'))
self.searchbutton = wx.Button(self.panel, -1, "Search Database")
self.Bind(wx.EVT_BUTTON, self.search, self.searchbutton)
self.searchbutton1 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name="1) Sonata in A Maj., Beethoven")
self.searchbutton2 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name= "2) Polonaise in G Min., Chopin")
self.searchbutton3 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name= "3) Rondo No. 5 in C# Min., Bartok")
self.searchbutton4 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name= "1) Sonata in A Maj., Beethoven")
self.searchbutton5 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name= "2) Polonaise in G Min., Chopin")
self.searchbutton6 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name= "3) Rondo No. 5 in C# Min., Bartok")
self.searchbutton7 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name= "1) Sonata in A Maj., Beethoven")
self.searchbutton8 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name= "2) Polonaise in G Min., Chopin")
self.searchbutton9 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name= "3) Rondo No. 5 in C# Min., Bartok")
self.searchbutton10 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name= "1) Sonata in A Maj., Beethoven")
self.Sbuttonlist = [self.searchbutton1,self.searchbutton2,
self.searchbutton3,self.searchbutton4,
self.searchbutton5,self.searchbutton6,
self.searchbutton7,self.searchbutton8,
self.searchbutton9,self.searchbutton10]
self.Bind(wx.EVT_BUTTON, self.getmeta1, self.searchbutton1)
self.Bind(wx.EVT_BUTTON, self.getmeta2, self.searchbutton2)
self.Bind(wx.EVT_BUTTON, self.getmeta3, self.searchbutton3)
self.Bind(wx.EVT_BUTTON, self.getmeta4, self.searchbutton4)
self.Bind(wx.EVT_BUTTON, self.getmeta5, self.searchbutton5)
self.Bind(wx.EVT_BUTTON, self.getmeta6, self.searchbutton6)
self.Bind(wx.EVT_BUTTON, self.getmeta7, self.searchbutton7)
self.Bind(wx.EVT_BUTTON, self.getmeta8, self.searchbutton8)
self.Bind(wx.EVT_BUTTON, self.getmeta9, self.searchbutton9)
self.Bind(wx.EVT_BUTTON, self.getmeta10, self.searchbutton10)
#self.plt_titlestr = ''
#self.plot_title = wx.StaticText(self.panel, -1, 'text1',(30,15), style=wx.ALIGN_CENTRE)
# Create the navigation toolbar, tied to the canvas
#
self.toolbar = NavigationToolbar(self.canvas)
#
# Layout with box sizers
#
flags = wx.ALIGN_LEFT | wx.ALL | wx.GROW
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.hbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.vbox2 = wx.BoxSizer(wx.VERTICAL)
self.vbox3 = wx.BoxSizer(wx.VERTICAL)
self.vbox2.AddStretchSpacer(1)
self.vbox2.Add(self.searchbutton1, 0, border=3, flag=flags)
self.vbox2.Add(self.searchbutton2, 0, border=3, flag=flags)
self.vbox2.Add(self.searchbutton3, 0, border=3, flag=flags)
self.vbox2.Add(self.searchbutton4, 0, border=3, flag=flags)
self.vbox2.Add(self.searchbutton5, 0, border=3, flag=flags)
self.vbox2.Add(self.searchbutton6, 0, border=3, flag=flags)
self.vbox2.Add(self.searchbutton7, 0, border=3, flag=flags)
self.vbox2.Add(self.searchbutton8, 0, border=3, flag=flags)
self.vbox2.Add(self.searchbutton9, 0, border=3, flag=flags)
self.vbox2.Add(self.searchbutton10, 0, border=3, flag=flags)
self.vbox2.AddStretchSpacer(1)
self.vbox3.Add(self.canvas, 10, wx.RIGHT | wx.TOP | wx.ALIGN_RIGHT | wx.GROW)
self.vbox3.Add(self.canvas2, 10, wx.RIGHT | wx.TOP | wx.ALIGN_RIGHT | wx.GROW)
self.hbox2.Add(self.vbox2, 0, wx.LEFT | wx.TOP | wx.ALIGN_LEFT| wx.GROW)
#self.panel.SetSizer(self.vbox)
#self.vbox.Fit(self)
self.hbox2.Add(self.vbox3, 10, wx.RIGHT | wx.TOP | wx.ALIGN_RIGHT | wx.GROW)
self.vbox.Add(self.hbox2, 0, wx.LEFT | wx.TOP | wx.GROW)
self.vbox.Add(self.toolbar, 0, wx.EXPAND)
self.vbox.AddSpacer(7)
self.hbox = wx.BoxSizer(wx.HORIZONTAL)
self.hbox.AddSpacer(15)
self.hbox.Add(self.combo, 0, border=3, flag=flags)
self.hbox.AddSpacer(30)
self.hbox.Add(self.setbutton, 0, border = 3, flag=flags)
self.hbox.AddSpacer(30)
self.hbox.Add(self.drawbutton, 0, border=3, flag=flags)
self.hbox.AddSpacer(30)
self.hbox.Add(self.play, 0, flag = flags)
self.hbox.Add(self.stop, 0, flag = flags)
self.hbox.Add(self.record, 0, flag = flags)
self.hbox.AddSpacer(30)
self.hbox.Add(self.searchbutton, 0, border=3, flag=flags)
self.hbox.AddSpacer(30)
self.vbox.Add(self.hbox, 0, flag = wx.ALIGN_LEFT | wx.BOTTOM | wx.EXPAND |wx.GROW)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
self.mypath = None
self.fsz = 0.040
self.hop = 0.020
self.fmax = 44100
self.x, self.fs, self.nbits = mir2.wavread('default.wav')
#self.tmax = round(float(len(self.x))/self.fs,2)
self.rectime = 20
self.tmax = self.rectime
self.tmin = 0
self.LG_flag = 0
self.LG_str = None
self.LG_vmin = 25
self.LG_vmax = 50
self.tmin_samp = None
self.tmax_samp = None
self.WC = 1
#self.rec_input = mir2.wavread('default.wav')#None
self.rec_input = None
self.rankresults = [('Beethoven_vln_sonata5_Francescatti_01.wav',1),('adksfjghl',3)]
self.dict = {'Beethoven_vln_sonata5_Zukerman_01.wav':
('Sonata No. 5, Mvt. 1', 'L. V. Beethoven','F Major','Violin and Piano', 'Pinchas Zukerman'),
'Beethoven_vln_sonata5_Zukerman_02.wav':
('Sonata No. 5, Mvt. 2', 'L. V. Beethoven','F Major','Violin and Piano', 'Pinchas Zukerman'),
'Beethoven_vln_sonata5_Zukerman_03.wav':
('Sonata No. 5, Mvt. 3', 'L. V. Beethoven','F Major','Violin and Piano', 'Pinchas Zukerman'),
'Beethoven_vln_sonata5_Zukerman_04.wav':
('Sonata No. 5, Mvt. 4', 'L. V. Beethoven','F Major','Violin and Piano', 'Pinchas Zukerman'),
'Beethoven_vln_sonata5_Zukerman_05.wav':
('Sonata No. 5, Mvt. 5', 'L. V. Beethoven','F Major','Violin and Piano', 'Pinchas Zukerman'),
'Beethoven_vln_sonata5_Oistrakh_01.wav':
('Sonata No. 5, Mvt. 1', 'L. V. Beethoven','F Major','Violin and Piano', 'David Oistrakh'),
'Beethoven_vln_sonata5_Oistrakh_02.wav':
('Sonata No. 5, Mvt. 2', 'L. V. Beethoven','F Major','Violin and Piano', 'David Oistrakh'),
'Beethoven_vln_sonata5_Oistrakh_03.wav':
('Sonata No. 5, Mvt. 3', 'L. V. Beethoven','F Major','Violin and Piano', 'David Oistrakh'),
'Beethoven_vln_sonata5_Oistrakh_04.wav':
('Sonata No. 5, Mvt. 4', 'L. V. Beethoven','F Major','Violin and Piano', 'David Oistrakh'),
'Beethoven_vln_sonata5_Oistrakh_05.wav':
('Sonata No. 5, Mvt. 5', 'L. V. Beethoven','F Major','Violin and Piano', 'David Oistrakh'),
'Beethoven_vln_sonata5_Francescatti_01.wav':
('Sonata No. 5, Mvt. 1', 'L. V. Beethoven','F Major','Violin and Piano', 'Zino Francescatti'),
'Beethoven_vln_sonata5_Francescatti_02.wav':
('Sonata No. 5, Mvt. 2', 'L. V. Beethoven','F Major','Violin and Piano', 'Zino Francescatti'),
'Beethoven_vln_sonata5_Francescatti_03.wav':
('Sonata No. 5, Mvt. 3', 'L. V. Beethoven','F Major','Violin and Piano', 'Zino Francescatti'),
'Beethoven_vln_sonata5_Francescatti_04.wav':
('Sonata No. 5, Mvt. 4', 'L. V. Beethoven','F Major','Violin and Piano', 'Zino Francescatti'),
'Beethoven_vln_sonata5_Francescatti_05.wav':
('Sonata No. 5, Mvt. 5', 'L. V. Beethoven','F Major','Violin and Piano', 'Zino Francescatti'),
'Bach Vln Partita3 - Fischbach 2004 - 01.wav':
('Partita No. 3 - Preludio', 'J. S. Bach', 'E Major', 'Violin', 'Garrett Fischbach'),
'Bach Vln Partita3 - Fischbach 2004 - 03.wav':
('Partita No. 3 - Gavotte en Rondeau', 'J. S. Bach', 'E Major', 'Violin', 'Garrett Fischbach'),
'Bach Vln Sonata1 - Fischbach 2004 - 02.wav':
('Sonata No. 1 - Fuga', 'J. S. Bach', 'G minor', 'Violin', 'Garrett Fischbach'),
'Bach Vln Partita3 - Milstein 1955 - 01.wav':
('Partita No. 3 - Preludio', 'J. S. Bach', 'E Major', 'Violin', 'Nathan Milstein'),
'Bach Vln Partita3 - Milstein 1955 - 03.wav':
('Partita No. 3 - Gavotte en Rondeau', 'J. S. Bach', 'E Major', 'Violin', 'Nathan Milstein'),
'Bach Vln Sonata1 - Milstein 1954 - 02.wav':
('Sonata No. 1 - Fuga', 'J. S. Bach', 'G minor', 'Violin', 'Nathan Milstein'),
'brahms_rhapsody_01.wav':
('Brahms Rhapsody Op.79, No.2', 'J. Brahms','G minor','Piano','Lili Kraus'),
'brahms_rhapsody_02.wav':
('Brahms Rhapsody Op.79, No.2', 'J. Brahms','G minor','Piano','Martha Argerich'),
'debussy_toccata.wav':
('Debussy Toccata from Pour le Piano', 'C. Debussy','N/A','Piano','Boris Feiner'),
'dont_stop_believin.wav':
('Don\'t Stop Believin\'', 'Journey','E major','Vocal, Guitar, Bass, Piano, Drums','Journey'),
'lady_madonna.wav':
('Lady Madonna', 'The Beatles','E major','Vocal, Guitar, Bass, Piano, Saxophone, Drums','The Beatles'),
'let_it_be.wav':
('Let it Be', 'The Beatles','C major','Vocal, Guitar, Bass, Piano, Drums','The Beatles'),
'moonlight.wav':
('Beethoven Piano Sonata No.14', 'L. Beethoven','C# minor','Piano','Unknown'),
'office_theme.wav':
('Theme from \'The Office\'', 'Unknown','G Major','Piano','Unknown'),
'konstantine.wav':
('Konstantine', 'Andrew McMahon','D minor','Vocal, Piano','Something Corporate'),
}
def create_status_bar(self):
self.statusbar = self.CreateStatusBar()
def draw_figure(self, i=0):
""" Redraws the figure
"""
if self.rec_input is None:
return
if self.mypath is None:
self.mypath = 'default.wav'
#self.x, self.fs, self.nbits = mir2.wavread(self.mypath)
if self.WC == 2:
path = 'train/'
filename = self.rankresults[i][0]
fullpath = path + filename
self.x, self.fs, self.nbits = mir2.wavread(fullpath)
if self.WC == 1:
self.x = self.rec_input
#self.x, self.fs, self.nbits = mir2.wavread(self.mypath)
print 'storing rec_input'
self.get_plot_type()
G = 0
self.tmax = float(len(self.x))/self.fs
self.tmin_samp = int(self.tmin*self.fs)
self.tmax_samp = int(self.tmax*self.fs)
if self.tmax_samp > len(self.x):
self.tmax_samp = len(self.x) - 1
print self.x.shape, self.fs, self.fsz, self.hop
if self.plot_type == 0:
P = self.x[self.tmin_samp:self.tmax_samp]
elif self.plot_type == 1:
G = mir2.spectrogram(self.x,self.fs, framesz = self.fsz, hop=self.hop, tmin=self.tmin, tmax=self.tmax)
elif self.plot_type == 2:
G = mir2.qspectrogram(self.x,self.fs, framesz = self.fsz, hop=self.hop, tmin=self.tmin, tmax=self.tmax)
elif self.plot_type == 3:
G = mir2.chromagram(self.x,self.fs, framesz = self.fsz, hop=self.hop, tmin=self.tmin, tmax=self.tmax)
#self.plot_titlestr = self.mypath + gramtype
self.axes.clear()
if self.plot_type == 0:
self.axes.plot(P)
elif self.plot_type == 1 or 2 or 3:
if self.LG_flag == 0:
self.LG_str = None
self.axes.imshow(G.X, aspect='auto', interpolation ='nearest',origin='lower')
elif self.LG_flag == 1:
self.LG_str = 'LogNorm(vmin = 25, vmax = 50)'
self.axes.imshow(G.X, aspect='auto', interpolation ='nearest',origin='lower', norm = LogNorm()) #vmin = self.LG_vmin, vmax = self.LG_vmax))
#self.WC = 1
if self.WC == 1:
self.canvas.draw()
if self.WC == 2:
self.canvas2.draw()
def which_canvas1(self, event):
self.WC = 1
def which_canvas2(self, event):
self.WC = 2
def on_draw_button(self, event):
self.get_plot_type
self.draw_figure()
def search(self, event):
self.ranklist = ['1) ','2) ','3) ','4) ','5) ','6) ','7) ','8) ','9) ','10) ']
self.titlelist = ['Sonata', 'Polonaise in G Min., Chopin',
'Rondo No. 5 in C# Min., Bartok', 'Sonata in A Maj., Beethoven',
'Polonaise in G Min., Chopin', 'Rondo No. 5 in C# Min., Bartok',
'Sonata in A Maj., Beethoven', 'Polonaise in G Min., Chopin',
'Rondo No. 5 in C# Min., Bartok','Rondo No. 5 in C# Min., Bartok']
self.rankresults = [('Beethoven_vln_sonata5_Francescatti_01.wav',1),('adksfjghl',3)]
print self.rec_input.shape, self.fs
for i in range(10):
self.Sbuttonlist[i].SetLabel('')
self.rankresults = self.musicsearch.query(self.rec_input, self.fs)
print self.rankresults
self.metalist = range(len(self.rankresults))
for i in range(len(self.rankresults)):
self.metalist[i] = self.dict[self.rankresults[i][0]]
for i in range(min(10, len(self.metalist))):
self.Sbuttonlist[i].SetLabel(self.ranklist[i] + self.metalist[i][0])
#self.create_main_panel()
self.WC = 2
#self.getmeta1(None)
def on_set_button(self, event):
self.get_plot_type()
params_box = ParamsDialog(self, -1, '', self.fsz, self.hop, self.tmin, self.tmax, self.plot_type)
val = params_box.ShowModal()
self.fsz, self.hop, self.tmin, self.tmax = params_box.return_params()
self.draw_figure()
params_box.Destroy()
def on_pick(self, event):
# The event received here is of the type
# matplotlib.backend_bases.PickEvent
#
# It carries lots of information, of which we're using
# only a small amount here.
#
box_points = event.artist.get_bbox().get_points()
msg = "You've clicked on a bar with coords:\n %s" % box_points
dlg = wx.MessageDialog(
self,
msg,
"Click!",
wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def on_text_enter(self, event):
self.draw_figure()
def openfile(self, event):
dlg = wx.FileDialog(self, "Choose a file", os.getcwd(), "", "*.wav", wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
basename = os.path.basename(path)
self.SetStatusText("You selected: %s" % basename)
self.mypath = path
self.x, self.fs, self.nbits = mir2.wavread(self.mypath)
self.rec_input = self.x
self.WC = 1
self.on_reset(self)
self.draw_figure()
dlg.Destroy()
def on_save_plot(self, event):
file_choices = "PNG (*.png)|*.png"
dlg = wx.FileDialog(
self,
message="Save plot as...",
defaultDir=os.getcwd(),
defaultFile="plot.png",
wildcard=file_choices,
style=wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.canvas.print_figure(path, dpi=self.dpi)
self.flash_status_message("Saved to %s" % path)
def on_play(self,event):
if self.WC == 2:
mir2.play(self.x, self.fs)
elif self.WC == 1:
mir2.play(self.rec_input, self.fs)
def on_rec(self,event):
print 'Recording.'
self.rec_input = mir.micread(self.rectime)
self.WC = 1
self.draw_figure()
mir.play(self.rec_input, 44100)
def est_key(self, event):
self.statusbar.SetStatusText('Estimating Key...')
keynum = mir2.Key(self.x, self.fs)
keylist = ['C', 'C#','D','D#','E','F','F#','G','G#','A','A#','B']
self.keystr = keylist[keynum]
self.statusbar.SetStatusText('The Key is: ' + self.keystr)
def on_exit(self, event):
self.Destroy()
def on_reset(self, event):
self.fsz = 0.040
self.hop = 0.020
self.fmax = self.fs
self.tmax = round(float(len(self.x))/self.fs,2)
self.tmin = 0
self.draw_figure()
def on_log_norm(self, event):
if self.LG_flag == 0:
self.LG_flag = 1
elif self.LG_flag == 1:
self.LG_flag = 0
self.draw_figure()
def on_about(self, event):
msg = """ Content-based musical search.\n Brennan Keegan, Steve Tjoa\n Signals and Information Group\n University of Maryland\n April 30, 2011 """
dlg = wx.MessageDialog(self, msg, "About", wx.OK)
dlg.ShowModal()
dlg.Destroy()
def flash_status_message(self, msg, flash_len_ms=1500):
self.statusbar.SetStatusText(msg)
self.timeroff = wx.Timer(self)
self.Bind(
wx.EVT_TIMER,
self.on_flash_status_off,
self.timeroff)
self.timeroff.Start(flash_len_ms, oneShot=True)
def on_flash_status_off(self, event):
self.statusbar.SetStatusText('')
def get_plot_type(self):
plotstr = self.combo.GetStringSelection()
for x in range(len(self.plot_select)):
if plotstr == self.plot_select[x]:
self.plot_type = x
def getmeta1(self, event):
if self.searchbutton1.GetLabel() == '':
return
self.draw_figure(0)
meta = self.metalist[0]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
def getmeta2(self, event):
if self.searchbutton2.GetLabel() == '':
return
self.draw_figure(1)
meta = self.metalist[1]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
def getmeta3(self, event):
if self.searchbutton3.GetLabel() == '':
return
self.draw_figure(2)
meta = self.metalist[2]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
def getmeta4(self, event):
if self.searchbutton4.GetLabel() == '':
return
self.draw_figure(3)
meta = self.metalist[3]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
def getmeta5(self, event):
if self.searchbutton5.GetLabel() == '':
return
self.draw_figure(4)
meta = self.metalist[4]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
def getmeta6(self, event):
if self.searchbutton6.GetLabel() == '':
return
self.draw_figure(5)
meta = self.metalist[5]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
def getmeta7(self, event):
if self.searchbutton7.GetLabel() == '':
return
self.draw_figure(6)
meta = self.metalist[6]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
def getmeta8(self, event):
if self.searchbutton8.GetLabel() == '':
return
self.draw_figure(7)
meta = self.metalist[7]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
def getmeta9(self, event):
if self.searchbutton9.GetLabel() == '':
return
self.draw_figure(8)
meta = self.metalist[8]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
def getmeta10(self, event):
if self.searchbutton10.GetLabel() == '':
return
self.draw_figure(9)
meta = self.metalist[9]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
class ParamsDialog(wx.Dialog):
def __init__(self, parent, id, title, fsz, hop, tmin, tmax, plot_type):
wx.Dialog.__init__(self, parent, id, title)#, size = (400,500))
self.fsz, self.hop, self.tmin, self.tmax, self.plot_type = str(fsz), str(hop), str(tmin), str(tmax), plot_type
if self.plot_type == 0:
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.tmin_label = wx.StaticText(self, -1, "Start Time (sec): ")
self.tmin_box = wx.TextCtrl(self,-1, self.tmin, style=wx.TE_PROCESS_ENTER)
self.tmax_label = wx.StaticText(self, -1, "End Time (sec): ")
self.tmax_box = wx.TextCtrl(self,-1, self.tmax, style=wx.TE_PROCESS_ENTER)
#self.Bind(wx.EVT_TEXT_ENTER, self.on_text_enter, self.fsz)
hbox1.AddSpacer(80)
hbox1.Add(self.tmin_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox1.AddSpacer(3)
hbox1.Add(self.tmin_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox2.AddSpacer(80)
hbox2.Add(self.tmax_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox2.AddSpacer(9)
hbox2.Add(self.tmax_box, 1, wx.ALIGN_CENTER|wx.TOP)
sizer = self.CreateButtonSizer(wx.CANCEL|wx.OK)
vbox.AddSpacer(10)
vbox.Add(hbox1, 1)
vbox.Add(hbox2, 1)
vbox.AddSpacer(15)
vbox.Add(sizer, 0, wx.ALIGN_CENTER)
vbox.AddSpacer(20)
self.SetSizer(vbox)
self.Bind(wx.EVT_BUTTON, self.OnOK, id=wx.ID_OK)
elif self.plot_type == 1:
self.fmin, self.fmax = '0.00', '44100'
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
hbox4 = wx.BoxSizer(wx.HORIZONTAL)
hbox5 = wx.BoxSizer(wx.HORIZONTAL)
hbox6 = wx.BoxSizer(wx.HORIZONTAL)
self.fsz_label = wx.StaticText(self, -1, "Frame Size (sec): ")
self.fsz_box = wx.TextCtrl(self,-1, self.fsz, style=wx.TE_PROCESS_ENTER)
self.hop_label = wx.StaticText(self, -1, "Hop Size (sec): ")
self.hop_box = wx.TextCtrl(self,-1, self.hop, style=wx.TE_PROCESS_ENTER)
self.tmin_label = wx.StaticText(self, -1, "Start Time (sec): ")
self.tmin_box = wx.TextCtrl(self,-1, self.tmin, style=wx.TE_PROCESS_ENTER)
self.tmax_label = wx.StaticText(self, -1, "End Time (sec): ")
self.tmax_box = wx.TextCtrl(self,-1, self.tmax, style=wx.TE_PROCESS_ENTER)
self.fmin_label = wx.StaticText(self, -1, "Min Freq. (Hz): ")
self.fmin_box = wx.TextCtrl(self,-1, self.fmin, style=wx.TE_PROCESS_ENTER)
self.fmax_label = wx.StaticText(self, -1, "Max Freq. (Hz): ")
self.fmax_box = wx.TextCtrl(self,-1, self.fmax, style=wx.TE_PROCESS_ENTER)
#self.Bind(wx.EVT_TEXT_ENTER, self.on_text_enter, self.fsz)
hbox1.AddSpacer(80)
hbox1.Add(self.fsz_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox1.Add(self.fsz_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox2.AddSpacer(80)
hbox2.Add(self.hop_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox2.AddSpacer(13)
hbox2.Add(self.hop_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox3.AddSpacer(80)
hbox3.Add(self.tmin_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox3.AddSpacer(3)
hbox3.Add(self.tmin_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox4.AddSpacer(80)
hbox4.Add(self.tmax_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox4.AddSpacer(9)
hbox4.Add(self.tmax_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox5.AddSpacer(80)
hbox5.Add(self.fmin_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox5.AddSpacer(13)
hbox5.Add(self.fmin_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox6.AddSpacer(80)
hbox6.Add(self.fmax_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox6.AddSpacer(9)
hbox6.Add(self.fmax_box, 1, wx.ALIGN_CENTER|wx.TOP)
sizer = self.CreateButtonSizer(wx.CANCEL|wx.OK)
space = 10
vbox.AddSpacer(10)
vbox.Add(hbox1, 1)
vbox.AddSpacer(space)
vbox.Add(hbox2, 1)
vbox.AddSpacer(space)
vbox.Add(hbox3, 1)
vbox.AddSpacer(space)
vbox.Add(hbox4, 1)
vbox.AddSpacer(space)
vbox.Add(hbox5, 1)
vbox.AddSpacer(space)
vbox.Add(hbox6, 1)
vbox.AddSpacer(15)
vbox.Add(sizer, 0, wx.ALIGN_CENTER)
vbox.AddSpacer(20)
self.SetSizer(vbox)
self.Bind(wx.EVT_BUTTON, self.OnOK, id=wx.ID_OK)
elif self.plot_type == 2:
self.fmin, self.fmax = '0', '136'
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
hbox4 = wx.BoxSizer(wx.HORIZONTAL)
hbox5 = wx.BoxSizer(wx.HORIZONTAL)
hbox6 = wx.BoxSizer(wx.HORIZONTAL)
self.fsz_label = wx.StaticText(self, -1, "Frame Size (sec): ")
self.fsz_box = wx.TextCtrl(self,-1, self.fsz, style=wx.TE_PROCESS_ENTER)
self.hop_label = wx.StaticText(self, -1, "Hop Size (sec): ")
self.hop_box = wx.TextCtrl(self,-1, self.hop, style=wx.TE_PROCESS_ENTER)
self.tmin_label = wx.StaticText(self, -1, "Start Time (sec): ")
self.tmin_box = wx.TextCtrl(self,-1, self.tmin, style=wx.TE_PROCESS_ENTER)
self.tmax_label = wx.StaticText(self, -1, "End Time (sec): ")
self.tmax_box = wx.TextCtrl(self,-1, self.tmax, style=wx.TE_PROCESS_ENTER)
self.fmin_label = wx.StaticText(self, -1, "Min Pitch (MIDI): ")
self.fmin_box = wx.TextCtrl(self,-1, self.fmin, style=wx.TE_PROCESS_ENTER)
self.fmax_label = wx.StaticText(self, -1, "Max Pitch (MIDI): ")
self.fmax_box = wx.TextCtrl(self,-1, self.fmax, style=wx.TE_PROCESS_ENTER)
#self.Bind(wx.EVT_TEXT_ENTER, self.on_text_enter, self.fsz)
hbox1.AddSpacer(80)
hbox1.Add(self.fsz_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox1.Add(self.fsz_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox2.AddSpacer(80)
hbox2.Add(self.hop_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox2.AddSpacer(13)
hbox2.Add(self.hop_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox3.AddSpacer(80)
hbox3.Add(self.tmin_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox3.AddSpacer(3)
hbox3.Add(self.tmin_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox4.AddSpacer(80)
hbox4.Add(self.tmax_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox4.AddSpacer(9)
hbox4.Add(self.tmax_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox5.AddSpacer(80)
hbox5.Add(self.fmin_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox5.AddSpacer(13)
hbox5.Add(self.fmin_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox6.AddSpacer(80)
hbox6.Add(self.fmax_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox6.AddSpacer(9)
hbox6.Add(self.fmax_box, 1, wx.ALIGN_CENTER|wx.TOP)
sizer = self.CreateButtonSizer(wx.CANCEL|wx.OK)
space = 10
vbox.AddSpacer(10)
vbox.Add(hbox1, 1)
vbox.AddSpacer(space)
vbox.Add(hbox2, 1)
vbox.AddSpacer(space)
vbox.Add(hbox3, 1)
vbox.AddSpacer(space)
vbox.Add(hbox4, 1)
vbox.AddSpacer(space)
vbox.Add(hbox5, 1)
vbox.AddSpacer(space)
vbox.Add(hbox6, 1)
vbox.AddSpacer(15)
vbox.Add(sizer, 0, wx.ALIGN_CENTER)
vbox.AddSpacer(20)
self.SetSizer(vbox)
self.Bind(wx.EVT_BUTTON, self.OnOK, id=wx.ID_OK)
elif self.plot_type == 3:
self.fmin, self.fmax = 'C', 'B'
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
hbox4 = wx.BoxSizer(wx.HORIZONTAL)
hbox5 = wx.BoxSizer(wx.HORIZONTAL)
hbox6 = wx.BoxSizer(wx.HORIZONTAL)
self.fsz_label = wx.StaticText(self, -1, "Frame Size (sec): ")
self.fsz_box = wx.TextCtrl(self,-1, self.fsz, style=wx.TE_PROCESS_ENTER)
self.hop_label = wx.StaticText(self, -1, "Hop Size (sec): ")
self.hop_box = wx.TextCtrl(self,-1, self.hop, style=wx.TE_PROCESS_ENTER)
self.tmin_label = wx.StaticText(self, -1, "Start Time (sec): ")
self.tmin_box = wx.TextCtrl(self,-1, self.tmin, style=wx.TE_PROCESS_ENTER)
self.tmax_label = wx.StaticText(self, -1, "End Time (sec): ")
self.tmax_box = wx.TextCtrl(self,-1, self.tmax, style=wx.TE_PROCESS_ENTER)
self.fmin_label = wx.StaticText(self, -1, "Min Pitch (Note): ")
self.fmin_box = wx.TextCtrl(self,-1, self.fmin, style=wx.TE_PROCESS_ENTER)
self.fmax_label = wx.StaticText(self, -1, "Max Pitch (Note): ")
self.fmax_box = wx.TextCtrl(self,-1, self.fmax, style=wx.TE_PROCESS_ENTER)
#self.Bind(wx.EVT_TEXT_ENTER, self.on_text_enter, self.fsz)
hbox1.AddSpacer(80)
hbox1.Add(self.fsz_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox1.Add(self.fsz_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox2.AddSpacer(80)
hbox2.Add(self.hop_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox2.AddSpacer(13)
hbox2.Add(self.hop_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox3.AddSpacer(80)
hbox3.Add(self.tmin_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox3.AddSpacer(3)
hbox3.Add(self.tmin_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox4.AddSpacer(80)
hbox4.Add(self.tmax_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox4.AddSpacer(9)
hbox4.Add(self.tmax_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox5.AddSpacer(80)
hbox5.Add(self.fmin_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox5.AddSpacer(13)
hbox5.Add(self.fmin_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox6.AddSpacer(80)
hbox6.Add(self.fmax_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox6.AddSpacer(9)
hbox6.Add(self.fmax_box, 1, wx.ALIGN_CENTER|wx.TOP)
sizer = self.CreateButtonSizer(wx.CANCEL|wx.OK)
space = 10
vbox.AddSpacer(10)
vbox.Add(hbox1, 1)
vbox.AddSpacer(space)
vbox.Add(hbox2, 1)
vbox.AddSpacer(space)
vbox.Add(hbox3, 1)
vbox.AddSpacer(space)
vbox.Add(hbox4, 1)
vbox.AddSpacer(space)
vbox.Add(hbox5, 1)
vbox.AddSpacer(space)
vbox.Add(hbox6, 1)
vbox.AddSpacer(15)
vbox.Add(sizer, 0, wx.ALIGN_CENTER)
vbox.AddSpacer(20)
self.SetSizer(vbox)
self.Bind(wx.EVT_BUTTON, self.OnOK, id=wx.ID_OK)
def OnOK(self, event):
if self.plot_type != 0:
self.fsz = float(self.fsz_box.GetValue())
self.hop = float(self.hop_box.GetValue())
self.tmin =float(self.tmin_box.GetValue())
self.tmax =float(self.tmax_box.GetValue())
self.Close()
def return_params(self):
return self.fsz, self.hop, self.tmin, self.tmax
if __name__ == '__main__':
app = wx.PySimpleApp()
app.frame = mirgui()
app.frame.Show()
app.frame.Maximize()
app.MainLoop()
| mit |
acshu/transit-gui | lib/Layout.py | 1 | 51186 | # -*- coding: utf-8 -*-
from ConfigParser import ConfigParser
from ast import literal_eval
from copy import copy
from genericpath import exists
from math import atan, degrees, sin, sqrt, log10
import operator
import os
import sys
import csv
from PyQt4.QtCore import Qt, pyqtSignal, QString, QAbstractTableModel, QVariant, QEvent
from PyQt4.QtGui import QWidget, QHBoxLayout, QVBoxLayout, QPushButton, QProgressBar, QGridLayout, QLabel, QCheckBox, QFileDialog, QMessageBox, QTabWidget, QLineEdit, QPalette, QSizePolicy, QColor, QTableWidget, QAbstractItemView, QMenu, QTableWidgetItem, QTableView, QAction
import math
import gc
from matplotlib import rcParams
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from lib.Structures import Global, TaskRange
from lib.Utils import Constants, TaskImporter, flip_phase_list, uc_variable_name
from lib.FormParams import *
class Layout(QWidget):
def __init__(self):
super(Layout, self).__init__()
self.setLayout(QHBoxLayout())
self.form = InputForm()
self.result = ResultView()
self.setObjectName('Layout')
self.layout().addWidget(self.form)
self.layout().addWidget(self.result)
class InputForm(QWidget):
__instance = None
@staticmethod
def instance():
return InputForm.__instance
def __init__(self):
if not InputForm.__instance:
InputForm.__instance = self
super(InputForm, self).__init__()
Global.event.task_started.connect(self._on_task_started)
Global.event.task_completed.connect(self._on_task_completed)
Global.event.task_progressed.connect(self._on_task_progressed)
Global.event.task_range_progressed.connect(self._on_task_range_progressed)
Global.event.interface_load_task_params.connect(self._on_interface_load_task_params)
self.vl = QVBoxLayout()
self.vl.setContentsMargins(0,0,0,0)
self.setLayout(self.vl)
self.setFixedWidth(290)
self.tab = QTabWidget()
self.vl.addWidget(self.tab)
self.input_parameters = InputParameters()
self.input_parameters.ranges_state_changed.connect(self._on_ranges_state_changed)
self.tab.addTab(self.input_parameters, 'Parameters')
self.import_parameters = ImportParameters()
self.tab.addTab(self.import_parameters, 'Observation')
control_buttons = QWidget()
control_buttons.setLayout(QVBoxLayout())
control_buttons.layout().setContentsMargins(0, 0, 0, 0)
control_buttons.layout().setAlignment(Qt.AlignBottom)
self._progress = QProgressBar()
self._progress.setValue(0)
self._progress.setTextVisible(True)
self._progress.setAlignment(Qt.AlignCenter)
self._progress.hide()
control_buttons.layout().addWidget(self._progress)
self._range_progress = QProgressBar()
self._range_progress.setValue(0)
self._range_progress.setTextVisible(True)
self._range_progress.setAlignment(Qt.AlignCenter)
self._range_progress.hide()
control_buttons.layout().addWidget(self._range_progress)
self._calculate = QPushButton('Calculate')
self._calculate.clicked.connect(self._on_calculate)
control_buttons.layout().addWidget(self._calculate)
self._cancel = QPushButton('Cancel')
self._cancel.hide()
self._cancel.clicked.connect(self._on_cancel)
control_buttons.layout().addWidget(self._cancel)
self.vl.addWidget(control_buttons)
if exists("./config/last-session.ini") :
self.load_params("./config/last-session.ini")
def _on_ranges_state_changed(self, parameters):
Global.task_range().reset()
if len(parameters):
keys = parameters.keys()
for key in parameters:
Global.task_range().set_range(key, copy(parameters[key].range.values))
self._calculate.setText('Calculate ' + str(Global.task_range()._total_count) + ' variations')
else:
self._calculate.setText('Calculate')
def _on_calculate(self):
self.import_parameters.import_observation()
combination = Global.task_range().get_next_combination()
Global.task().input.semi_major_axis = self.input_parameters.semi_major_axis.getValue()
Global.task().input.star_radius = self.input_parameters.star_radius.getValue()
Global.task().input.planet_radius = self.input_parameters.planet_radius.getValue()
Global.task().input.star_temperature = self.input_parameters.star_temperature.getValue()
Global.task().input.planet_temperature = self.input_parameters.planet_temperature.getValue()
Global.task().input.darkening_law = self.input_parameters.darkening_law.value.itemData(self.input_parameters.darkening_law.value.currentIndex()).toString()
Global.task().input.darkening_coefficient_1 = self.input_parameters.darkening_coefficient_1.getValue()
Global.task().input.darkening_coefficient_2 = self.input_parameters.darkening_coefficient_2.getValue()
Global.task().input.inclination = self.input_parameters.inclination.getValue()
Global.task().input.phase_start = 0
Global.task().input.phase_end = self.input_parameters.phase_end.getValue()
Global.task().input.phase_step = self.input_parameters.phase_step.getValue()
Global.task().input.precision = 10**self.input_parameters.integration_precision.getValue()
if combination:
for param in combination:
setattr(Global.task().input, param[0], param[1])
Global.task().start()
def _on_task_range_progressed(self, progress):
self._range_progress.setFormat( str(Global.task_range().completed_count()) + ' of ' + str(Global.task_range().total_count()))
self._range_progress.setValue(math.ceil(((float(Global.task_range().completed_count()))/Global.task_range().total_count())*100))
self._on_calculate()
def _on_task_started(self, task):
self._calculate.hide()
self._progress.show()
self._progress.setValue(0)
self._cancel.show()
self.tab.setDisabled(True)
if Global.task_range().total_count():
self._range_progress.show()
if Global.task_range().completed_count() == 0:
self._range_progress.setFormat('0 of ' + str(Global.task_range().total_count()))
self._range_progress.setValue(0)
def _on_task_progressed(self, task, progress):
self._progress.setValue(progress)
def _on_task_completed(self, task):
if Global.task_range().total_count() and Global.task_range().completed_count():
return
self._calculate.show()
self._progress.hide()
self._progress.setValue(0)
self._cancel.hide()
self.tab.setDisabled(False)
self._range_progress.hide()
self._range_progress.setValue(0)
def _on_cancel(self):
Global.task().stop()
self._calculate.show()
self._progress.hide()
self._progress.setValue(0)
self._range_progress.hide()
self._range_progress.setValue(0)
self._cancel.hide()
self.tab.setDisabled(False)
def _on_interface_load_task_params(self, task):
self.input_parameters.semi_major_axis.value.setValue(task.input.semi_major_axis)
self.input_parameters.star_radius.value.setValue(task.input.star_radius)
self.input_parameters.planet_radius.value.setValue(task.input.planet_radius)
self.input_parameters.star_temperature.value.setValue(task.input.star_temperature)
self.input_parameters.planet_temperature.value.setValue(task.input.planet_temperature)
self.input_parameters.inclination.value.setValue(task.input.inclination)
darkening_law_index = 0
for item in DarkeningLaw.items:
if item[1] == task.input.darkening_law:
break
darkening_law_index += 1
self.input_parameters.darkening_law.value.setCurrentIndex(darkening_law_index)
self.input_parameters.darkening_coefficient_1.value.setValue(task.input.darkening_coefficient_1)
self.input_parameters.darkening_coefficient_2.value.setValue(task.input.darkening_coefficient_2)
self.input_parameters.phase_end.value.setValue(task.input.phase_end)
self.input_parameters.phase_step.value.setValue(task.input.phase_step)
self.input_parameters.integration_precision.value.setValue(log10(task.input.precision))
for parameter_name in copy(self.input_parameters.range_parameters):
parameter = getattr(self.input_parameters, parameter_name)
if parameter.range:
parameter.range.set_active(False)
self.repaint()
def load_params(self, filename):
config = ConfigParser()
config.read(filename)
self._normalize_config(config)
# Input Parameters
self._load_config_param(config, 'input', 'semi_major_axis')
self._load_config_param(config, 'input', 'star_radius')
self._load_config_param(config, 'input', 'planet_radius')
self._load_config_param(config, 'input', 'star_temperature')
self._load_config_param(config, 'input', 'planet_temperature')
self._load_config_param(config, 'input', 'inclination')
self._load_config_param(config, 'input', 'darkening_law')
self._load_config_param(config, 'input', 'darkening_coefficient_1')
self._load_config_param(config, 'input', 'darkening_coefficient_2')
self._load_config_param(config, 'input', 'phase_end')
self._load_config_param(config, 'input', 'phase_step')
self._load_config_param(config, 'input', 'integration_precision')
# Import Parameters
if config.has_option('import', 'filename') and config.get('import', 'filename'):
if '/data/' in config.get('import', 'filename') and config.get('import', 'filename').index('/data/') == 0:
self.import_parameters.filename = os.getcwd().replace('\\', '/') + config.get('import', 'filename')
else:
self.import_parameters.filename = config.get('import', 'filename')
self.import_parameters.update_file_label()
if config.has_option('import', 'jd2phase') and config.getboolean('import', 'jd2phase') == True :
self.import_parameters.hjd_to_phases.setCheckState(Qt.Checked)
if config.has_option('import', 'jd2phase_tzero') :
self.import_parameters.time_zero.setValue(config.getfloat('import', 'jd2phase_tzero'))
if config.has_option('import', 'jd2phase_period') :
self.import_parameters.period.setValue(config.getfloat('import', 'jd2phase_period'))
if config.has_option('import', 'mag2flux') and config.getboolean('import', 'mag2flux') == True :
self.import_parameters.magnitude_to_flux.setCheckState(Qt.Checked)
if config.has_option('import', 'mag2flux_mag') :
self.import_parameters.magnitude_max.setValue(config.getfloat('import', 'mag2flux_mag'))
# Fixes painting bug with range buttons when loading new file
# the active ranges stayed active even if they are inactive
self.repaint()
def _normalize_config(self, config):
if config.has_option('input', 'darkening_1'):
config.set('input', 'darkening_coefficient_1', config.get('input', 'darkening_1'))
config.remove_option('input', 'darkening_1')
if config.has_option('input', 'darkening_2'):
config.set('input', 'darkening_coefficient_2', config.get('input', 'darkening_2'))
config.remove_option('input', 'darkening_2')
if config.has_option('input', 'precision'):
config.set('input', 'integration_precision', config.get('input', 'precision'))
config.remove_option('input', 'precision')
def _load_config_param(self, config, section, name):
param = getattr(self.input_parameters, name)
if config.has_option(section, name):
if type(param.value) is QComboBox:
param.value.setCurrentIndex(config.getint(section, name))
else:
param.value.setValue(literal_eval(config.get(section, name)))
if param.range:
_from = _to = _step = _values = None
_active = False
if config.has_option(section, name + '_range_from'):
_from = literal_eval(config.get(section, name + '_range_from'))
if config.has_option(section, name + '_range_to'):
_to = literal_eval(config.get(section, name + '_range_to'))
if config.has_option(section, name + '_range_step'):
_step = literal_eval(config.get(section, name + '_range_step'))
if config.has_option(section, name + '_range_values'):
_values = literal_eval(config.get(section, name + '_range_values'))
if config.has_option(section, name + '_range_active'):
_active = config.getboolean(section, name + '_range_active')
if _values:
param.range.set_range(_values)
elif _from and _to and _step:
param.range.set_range(_from, _to, _step)
param.range.set_active(_active)
def _save_config_param(self, config, section, name):
param = getattr(self.input_parameters, name)
if type(param.value) is QComboBox:
config.set(section, name, param.value.currentIndex())
else:
config.set(section, name, param.getValue())
if param.range:
if param.range.range_type == RangeButton.TYPE_STEP and \
param.range.range_from and param.range.range_to and param.range.range_step:
config.set(section, name + '_range_from', param.range.range_from)
config.set(section, name + '_range_to', param.range.range_to)
config.set(section, name + '_range_step', param.range.range_step)
elif param.range.range_type == RangeButton.TYPE_VALUES and param.range.values:
config.set(section, name + '_range_values', param.range.values)
if param.range.is_active():
config.set(section, name + '_range_active', param.range.is_active())
def save_params(self, filename):
config = ConfigParser()
config.add_section('input')
# Input Parameters
self._save_config_param(config, 'input', 'semi_major_axis')
self._save_config_param(config, 'input', 'star_radius')
self._save_config_param(config, 'input', 'planet_radius')
self._save_config_param(config, 'input', 'star_temperature')
self._save_config_param(config, 'input', 'planet_temperature')
self._save_config_param(config, 'input', 'inclination')
self._save_config_param(config, 'input', 'darkening_law')
self._save_config_param(config, 'input', 'darkening_coefficient_1')
self._save_config_param(config, 'input', 'darkening_coefficient_2')
self._save_config_param(config, 'input', 'phase_end')
self._save_config_param(config, 'input', 'phase_step')
self._save_config_param(config, 'input', 'integration_precision')
config.add_section('import')
if os.getcwd().replace('\\', '/') in str(self.import_parameters.filename) and str(self.import_parameters.filename).index(os.getcwd().replace('\\', '/')) == 0 :
save_file_path = str(self.import_parameters.filename).replace(os.getcwd().replace('\\', '/'), '')
else:
save_file_path = str(self.import_parameters.filename)
config.set('import', 'filename', save_file_path)
config.set('import', 'jd2phase', self.import_parameters.hjd_to_phases.checkState() == Qt.Checked)
config.set('import', 'jd2phase_tzero', self.import_parameters.time_zero.value())
config.set('import', 'jd2phase_period', self.import_parameters.period.value())
config.set('import', 'mag2flux', self.import_parameters.magnitude_to_flux.checkState() == Qt.Checked)
config.set('import', 'mag2flux_mag', self.import_parameters.magnitude_max.value())
with open(filename, 'wb') as configfile:
config.write(configfile)
pass
class InputParameters(QWidget):
ranges_state_changed = pyqtSignal(dict)
def __init__(self):
QWidget.__init__(self)
self.range_parameters = dict()
self.grid = QGridLayout()
self.grid.setAlignment(Qt.AlignTop)
self.grid.setColumnStretch(2, 2)
self.setLayout(self.grid)
# Semi-major axis
self.semi_major_axis = self.add_triplet(SemiMajorAxis(), 1)
self.semi_major_axis.range.clicked.connect(lambda: self._on_range_clicked('semi_major_axis'))
self.semi_major_axis.range.state_changed.connect(self.semi_major_axis.value.setDisabled)
self.semi_major_axis.range.state_changed.connect(lambda: self._on_range_changed('semi_major_axis'))
# Star radius
self.star_radius = self.add_triplet(StarRadiusAU(), 2)
self.star_radius_rs = self.add_triplet(StarRadiusRS(), 3)
self.star_radius.range.clicked.connect(lambda: self._on_range_clicked('star_radius'))
self.star_radius.range.state_changed.connect(self.star_radius.value.setDisabled)
self.star_radius.range.state_changed.connect(self.star_radius_rs.value.setDisabled)
self.star_radius.range.state_changed.connect(lambda: self._on_range_changed('star_radius'))
self.star_radius.value.valueChanged.connect(self._on_star_radius_change)
self.star_radius_rs.value.valueChanged.connect(self._on_star_radius_rs_change)
# Planet radius
self.planet_radius = self.add_triplet(PlanetRadiusAU(), 4)
self.planet_radius_rj = self.add_triplet(PlanetRadiusRJ(), 5)
self.planet_radius.range.clicked.connect(lambda: self._on_range_clicked('planet_radius'))
self.planet_radius.range.state_changed.connect(self.planet_radius.value.setDisabled)
self.planet_radius.range.state_changed.connect(self.planet_radius_rj.value.setDisabled)
self.planet_radius.range.state_changed.connect(lambda: self._on_range_changed('planet_radius'))
self.planet_radius.value.valueChanged.connect(self._on_planet_radius_change)
self.planet_radius_rj.value.valueChanged.connect(self._on_planet_radius_rj_change)
# Star temperature
self.star_temperature = self.add_triplet(StarTemperature(), 6)
self.star_temperature.range.clicked.connect(lambda: self._on_range_clicked('star_temperature'))
self.star_temperature.range.state_changed.connect(self.star_temperature.value.setDisabled)
self.star_temperature.range.state_changed.connect(lambda: self._on_range_changed('star_temperature'))
# Planet temperature
self.planet_temperature = self.add_triplet(PlanetTemperature(), 7)
self.planet_temperature.range.clicked.connect(lambda: self._on_range_clicked('planet_temperature'))
self.planet_temperature.range.state_changed.connect(self.planet_temperature.value.setDisabled)
self.planet_temperature.range.state_changed.connect(lambda: self._on_range_changed('planet_temperature'))
# Inclination
self.inclination = self.add_triplet(Inclination(), 8)
self.inclination.range.clicked.connect(lambda: self._on_range_clicked('inclination'))
self.inclination.range.state_changed.connect(self.inclination.value.setDisabled)
self.inclination.range.state_changed.connect(lambda: self._on_range_changed('inclination'))
# Darkening law
self.darkening_law = self.add_triplet(DarkeningLaw(), 9)
self.darkening_law.range.clicked.connect(lambda: self._on_range_clicked('darkening_law'))
self.darkening_law.range.state_changed.connect(self.darkening_law.value.setDisabled)
self.darkening_law.range.state_changed.connect(lambda: self._on_range_changed('darkening_law'))
# Darkening coefficients
self.darkening_coefficient_1 = self.add_triplet(DarkeningCoefficient('Dark. coefficient 1:', ''), 10)
self.darkening_coefficient_1.range.clicked.connect(lambda: self._on_range_clicked('darkening_coefficient_1'))
self.darkening_coefficient_1.range.state_changed.connect(self.darkening_coefficient_1.value.setDisabled)
self.darkening_coefficient_1.range.state_changed.connect(lambda: self._on_range_changed('darkening_coefficient_1'))
self.darkening_coefficient_2 = self.add_triplet(DarkeningCoefficient('Dark. coefficient 2:', ''), 11)
self.darkening_coefficient_2.range.clicked.connect(lambda: self._on_range_clicked('darkening_coefficient_2'))
self.darkening_coefficient_2.range.state_changed.connect(self.darkening_coefficient_2.value.setDisabled)
self.darkening_coefficient_2.range.state_changed.connect(lambda: self._on_range_changed('darkening_coefficient_2'))
# Phase end
self.phase_end = self.add_triplet(PhaseEnd(), 12)
# Phase step
self.phase_step = self.add_triplet(PhaseStep(), 13)
# integration precision
self.integration_precision = self.add_triplet(IntegrationPrecision(), 14)
def _on_star_radius_change(self, value):
self.star_radius_rs.value.blockSignals(True)
self.star_radius_rs.value.setValue(Constants.au_to_rs(value))
self.star_radius_rs.value.blockSignals(False)
def _on_star_radius_rs_change(self, value):
self.star_radius.value.blockSignals(True)
self.star_radius.value.setValue(Constants.rs_to_au(value))
self.star_radius.value.blockSignals(False)
def _on_planet_radius_change(self, value):
self.planet_radius_rj.value.blockSignals(True)
self.planet_radius_rj.value.setValue(Constants.au_to_rj(value))
self.planet_radius_rj.value.blockSignals(False)
def _on_planet_radius_rj_change(self, value):
self.planet_radius.value.blockSignals(True)
self.planet_radius.value.setValue(Constants.rj_to_au(value))
self.planet_radius.value.blockSignals(False)
def _on_range_clicked(self, name):
if not getattr(self, name).range.is_active():
if getattr(self, name) == self.darkening_law:
dialog = getattr(sys.modules[__name__], uc_variable_name(name) + 'RangeDialog')(getattr(self, name).range.values)
else:
dialog = getattr(sys.modules[__name__], uc_variable_name(name) + 'RangeDialog')(getattr(self, name).range.range_from, getattr(self, name).range.range_to, getattr(self, name).range.range_step)
dialog.accepted.connect(lambda: self._on_range_accepted(name))
dialog.rejected.connect(lambda: self._on_range_rejected(name))
dialog.display()
else:
self._on_range_rejected(name)
pass
def _on_range_accepted(self, name):
if getattr(self, name) == self.darkening_law:
getattr(self, name).range.set_range(self.sender().values())
else:
getattr(self, name).range.set_range(getattr(self.sender(), name + '_from').getValue(),
getattr(self.sender(), name + '_to').getValue(),
getattr(self.sender(), name + '_step').getValue())
getattr(self, name).range.set_active(True)
def _on_range_rejected(self, name):
getattr(self, name).range.set_active(False)
if name == 'planet_radius':
self.planet_radius_rj.value.setDisabled(False)
def _on_range_changed(self, name):
if getattr(self, name).range.is_active():
self.range_parameters[name] = getattr(self, name)
elif self.range_parameters.has_key(name):
del self.range_parameters[name]
self.ranges_state_changed.emit(self.range_parameters)
def add_triplet(self, triplet, position):
self.grid.addWidget(triplet.label, position, 0)
self.grid.addWidget(triplet.range, position, 1)
self.grid.addWidget(triplet.value, position, 2)
self.grid.addWidget(triplet.unit, position, 3)
return triplet
class ImportParameters(QWidget):
def __init__(self):
QWidget.__init__(self)
self.filename = ''
self.import_phases = []
self.import_values = []
grid = QGridLayout()
grid.setAlignment(Qt.AlignTop)
grid.setColumnStretch(1,1)
self.setLayout(grid)
self.filename_label = QLabel('No file selected')
self.file_browse = QPushButton('Browse...')
self.file_browse.setFixedWidth(85)
self.file_browse.clicked.connect(self._on_file_browse)
self.file_clear = QPushButton('Clear')
self.file_clear.setFixedWidth(85)
self.file_clear.clicked.connect(self._on_file_clear)
self.file_clear.setHidden(True)
grid.addWidget(self.filename_label, 1, 0, 1, 0)
grid.addWidget(self.file_browse, 1, 3)
grid.addWidget(self.file_clear, 1, 3)
self.hjd_to_phases = QCheckBox('Convert HJD to phases')
self.hjd_to_phases.stateChanged.connect(self._on_hjd_state_changed)
grid.addWidget(self.hjd_to_phases, 2, 0, 1, 0)
self.time_zero_label = QLabel('T<sub>0</sub>')
self.time_zero_label.setFixedWidth(20)
self.time_zero = CustomDoubleSpinBox()
self.time_zero.setSingleStep(0.01)
self.time_zero.setDecimals(10)
self.time_zero.setAccelerated(True)
self.time_zero.setDisabled(True)
self.time_zero.setMinimum(0)
self.time_zero.setFixedWidth(200)
self.time_zero.setRange(0, sys.float_info.max)
grid.addWidget(self.time_zero_label, 3, 0)
grid.addWidget(self.time_zero, 3, 1)
self.period_label = QLabel('P')
self.period_label.setFixedWidth(20)
self.period = CustomDoubleSpinBox()
self.period.setFixedWidth(200)
self.period.setDisabled(True)
self.period.setRange(0, sys.float_info.max)
self.period.setDecimals(10)
grid.addWidget(self.period_label, 4, 0)
grid.addWidget(self.period, 4, 1)
self.magnitude_to_flux = QCheckBox('Convert magnitude to flux')
self.magnitude_to_flux.stateChanged.connect(self._on_magnitude_state_changed)
grid.addWidget(self.magnitude_to_flux, 5, 0, 1, 0)
self.magnitude_max_label = QLabel('Mag')
self.magnitude_max = CustomDoubleSpinBox()
self.magnitude_max.setSingleStep(0.01)
self.magnitude_max.setDecimals(10)
self.magnitude_max.setAccelerated(True)
self.magnitude_max.setDisabled(True)
self.magnitude_max.setMinimum(0)
self.magnitude_max.setFixedWidth(105)
grid.addWidget(self.magnitude_max_label, 6, 0)
grid.addWidget(self.magnitude_max, 6, 1)
self.redraw = QPushButton("Redraw")
self.redraw.clicked.connect(self._on_redraw)
grid.addWidget(self.redraw, 6,3)
def _on_file_browse(self):
directory = "" if self.filename is None else QString(str("/").join(str(self.filename).split("/")[:-1]))
types = TaskImporter.get_formats()
filters = []
for value in types :
filters.append(value.upper() + " (*." + value + ")")
filters.append("All files (*.*)")
self.filename = QFileDialog.getOpenFileName(self, 'Open file', directory=directory, filter=";;".join(filters))
self.update_file_label()
def _on_file_clear(self):
self.filename = ''
self.update_file_label()
def update_file_label(self):
if self.filename :
self.filename_label.setText(self.filename.split("/")[-1])
self.file_clear.setHidden(False)
self.redraw.setDisabled(False)
else:
self.filename_label.setText('No file selected')
self.file_clear.setHidden(True)
self.redraw.setDisabled(True)
pass
def import_observation(self):
if not self.filename :
return
try:
phases, values = TaskImporter.load_file(self.filename)
# convert JD time to phases
if self.hjd_to_phases.checkState() == Qt.Checked:
if self.time_zero.value() <= 0 :
QMessageBox.warning(self, "Error", 'Invalid parameter "T<sub>0</sub>"!')
return
if self.period.value() <= 0 :
QMessageBox.warning(self, "Error", 'Invalid parameter "P"!')
return
for (index, phase) in enumerate(phases):
phases[index] = (phase - self.time_zero.value()) / self.period.value() % 1
# convert magnitude to flux
if self.magnitude_to_flux.checkState() == Qt.Checked:
for (index, value) in enumerate(values):
values[index] = 10**(-(value - self.magnitude_max.value())/2.5)
phases = flip_phase_list(phases)
# TODO Detrending
#slope = (values[8] - values[-8])/(phases[8] - phases[-8])
#angle = atan(slope)
#
#for index, value in enumerate(values):
# hyp = sqrt(abs((phases[-8] - phases[index])**2 - (values[-8] - values[index])**2))
# print hyp
# values[index] += hyp * sin(angle)
self.import_phases = phases
self.import_values = values
Global.event.data_imported.emit(self.import_phases, self.import_values)
except:
QMessageBox.critical(self, "Import error", "Error importing data!\nError: " + str(sys.exc_info()[1]))
raise
def _on_redraw(self):
if not self.filename :
QMessageBox.warning(self, "Import file", "Please import file first")
return
self.import_observation()
Global.event.interface_redraw_clicked.emit()
pass
def _on_hjd_state_changed(self, state):
if state == Qt.Checked:
self.time_zero.setDisabled(False)
self.period.setDisabled(False)
else:
self.time_zero.setDisabled(True)
self.period.setDisabled(True)
pass
def _on_magnitude_state_changed(self, state):
if state == Qt.Checked:
self.magnitude_max.setDisabled(False)
else:
self.magnitude_max.setDisabled(True)
pass
class ResultView(QTabWidget):
def __init__(self):
super(ResultView, self).__init__()
self.results = ResultsTab()
self.addTab(self.results, 'Results')
self.plot = ResultPlot()
self.addTab(self.plot, 'Plot')
self.data = ResultTab()
self.addTab(self.data, 'Data')
self.setCurrentIndex(1)
Global.event.task_selected.connect(self._on_task_selected)
def _on_task_selected(self, task):
self.setCurrentIndex(1)
class ResultPlot(QWidget):
def __init__(self):
super(ResultPlot, self).__init__()
vl = QVBoxLayout()
self.plot = Plot()
vl.setAlignment(Qt.AlignTop)
vl.addWidget(self.plot)
self.residual_plot = ResidualPlot()
self.residual_plot.setFixedHeight(150)
vl.addWidget(self.residual_plot)
hl = QHBoxLayout()
#hl.setAlignment(Qt.AlignHCenter)
self.chi2_label = QLabel('chi^2')
self.chi2_label.setFixedWidth(30)
self.chi2_label.hide();
self.chi2_value = QLineEdit()
self.chi2_value.setAlignment(Qt.AlignRight)
self.chi2_value.setFixedWidth(120)
self.chi2_value.hide()
auto_plot = QCheckBox('Auto plot finished result')
auto_plot.stateChanged.connect(self._on_auto_plot_state_changed)
hl.addWidget(auto_plot, Qt.AlignLeft)
hl.addWidget(self.chi2_label)
hl.addWidget(self.chi2_value)
hl.setStretch(1, 0)
vl.addLayout(hl)
self.setLayout(vl)
def _on_auto_plot_state_changed(self, checked_state):
checked_state = True if checked_state else False
Global.event.interface_auto_plot_state_changed.emit(checked_state)
class Plot(FigureCanvas):
__instance = None
def __init__(self):
Global.event.task_selected.connect(self._on_task_selected)
Global.event.task_deleted.connect(self._on_task_deleted)
Global.event.tasks_list_updated.connect(self._on_tasks_list_updated)
self.task = None
self.last_x_limit = []
self.axes = None
bg_color = str(QPalette().color(QPalette.Active, QPalette.Window).name())
rcParams.update({'font.size': 10})
self.figure = Figure(facecolor=bg_color, edgecolor=bg_color)
self.figure.hold(False)
super(Plot, self).__init__(self.figure)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.updateGeometry()
def _on_task_selected(self, task):
self.set_task(task)
self.redraw()
def _on_task_deleted(self, task):
if self.task == task:
self.set_task(None)
self.clear()
ResultTab.instance().set_data([], [], [], [])
def _on_tasks_list_updated(self):
if not len(Global.tasks()):
self.set_task(None)
self.clear()
ResultTab.instance().set_data([], [], [], [])
@staticmethod
def instance():
return Plot.__instance
def set_task(self, task):
self.task = task
def clear(self):
self.figure.clf()
self.figure.clear()
gc.collect()
def redraw(self):
self.clear()
self.axes = self.figure.add_subplot(1, 1, 1)
self.axes.grid(True)
self.axes.set_xlabel('Phase')
self.axes.set_ylabel('Flux')
result_phases = []
result_values = []
import_phases = []
import_values = []
keys = sorted(self.task.result.data().keys())
for key in keys:
if self.task.result.data()[key]['result_value'] is not None:
result_phases.append(key)
result_values.append(self.task.result.data()[key]['result_value'])
if self.task.result.data()[key]['import_value'] is not None:
import_phases.append(key)
import_values.append(self.task.result.data()[key]['import_value'])
ResultTab.instance().set_data(result_phases, result_values, import_phases, import_values)
if not result_phases and not import_phases :
return
y_r_min = 1
y_r_max = 0
x_r_max = 0
y_i_min = 1
y_i_max = 0
x_i_max = 0
if result_values :
y_r_min = min(result_values)
y_r_max = max(result_values)
x_r_max = max(abs(min(result_phases)), abs(max(result_phases)))
if import_values :
y_i_min = min(import_values)
y_i_max = max(import_values)
x_i_max = max(abs(min(import_phases)), abs(max(import_phases)))
y_max = max(y_r_max, y_i_max)
y_min = min(y_r_min, y_i_min)
x_max = max(x_r_max, x_i_max)
y_pad = ((y_max - y_min) / 100) * 10
x_pad = (x_max / 100) * 10
if y_min == y_max:
y_min += 1
y_max -= 1
self.axes.set_autoscaley_on(False)
self.axes.set_ylim([y_min - y_pad, y_max + y_pad])
self.last_x_limit = [-(x_max + x_pad), x_max + x_pad]
Global.event.plot_x_limit_changed.emit(self.last_x_limit)
self.axes.set_autoscalex_on(False)
self.axes.set_xlim(self.last_x_limit)
if len(result_phases):
self.axes.plot(result_phases, result_values, color='b', label="Prediction")
if len(import_phases):
self.axes.scatter(import_phases, import_values, s=1, color='r', label='Observation')
self.draw()
class ResidualPlot(FigureCanvas):
__instance = None
def __init__(self):
Global.event.task_selected.connect(self._on_task_selected)
Global.event.plot_x_limit_changed.connect(self._on_x_limit_changed)
Global.event.task_deleted.connect(self._on_task_deleted)
Global.event.tasks_list_updated.connect(self._on_tasks_list_updated)
self.task = None
self.axes = None
self.last_x_limit = []
self.chi2s = []
bg_color = str(QPalette().color(QPalette.Active, QPalette.Window).name())
rcParams.update({'font.size': 10})
self.figure = Figure(facecolor=bg_color, edgecolor=bg_color)
self.figure.hold(False)
super(ResidualPlot, self).__init__(self.figure)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.updateGeometry()
self.hide()
def _on_task_selected(self, task):
self.set_task(task)
self.redraw()
def _on_task_deleted(self, task):
if self.task == task:
self.set_task(None)
self.clear()
def _on_tasks_list_updated(self):
if not len(Global.tasks()):
self.set_task(None)
self.clear()
def set_task(self, task):
self.task = task
def clear(self):
self.figure.clf()
self.figure.clear()
self.draw()
self.parent().chi2_label.hide()
self.parent().chi2_value.hide()
self.hide()
gc.collect()
def redraw(self):
self.clear()
if self.task.result.chi2 is None:
self.parent().chi2_label.hide()
self.parent().chi2_value.hide()
self.hide()
return
self.chi2s.append(self.task.result.chi2)
self.show()
self.parent().chi2_label.show()
self.parent().chi2_value.show()
self.axes = self.figure.add_subplot(1, 1, 1)
self.axes.grid(False)
self.figure.set_alpha(0)
self.axes.set_xlabel('Phase')
self.axes.set_ylabel('Residual')
phases = []
delta_values = []
keys = sorted(self.task.result.data().keys())
for key in keys:
if self.task.result.data()[key]['delta_value'] is not None:
phases.append(key)
delta_values.append(self.task.result.data()[key]['delta_value'])
y_max = max(abs(min(delta_values)), abs(max(delta_values)))
y_pad = (y_max / 100) * 10
self.axes.set_autoscaley_on(False)
self.axes.set_ylim([-(y_max + y_pad), y_max + y_pad])
self.axes.set_autoscalex_on(False)
self.axes.set_xlim(self.last_x_limit)
color = QColor(0,0,0)
min_chi2 = min(self.chi2s)
if len(self.chi2s) == 1 :
color = QColor(0,0,0)
elif self.task.result.chi2 <= min_chi2 :
color = QColor(0,139,0)
else:
color = QColor(255,0,0)
self.axes.axhline(y=0, ls='--', linewidth=0.5, color='black')
self.axes.scatter(phases, delta_values, s=0.5, color='r')
palette = self.parent().chi2_value.palette()
palette.setColor(QPalette.Active, QPalette.Text, color)
self.parent().chi2_value.setPalette(palette)
self.parent().chi2_value.setText(str(self.task.result.chi2))
self.draw()
def _on_x_limit_changed(self, limit):
self.last_x_limit = limit
class ResultTab(QWidget):
__instance = None
def __init__(self):
super(QWidget, self).__init__()
if ResultTab.__instance is None :
ResultTab.__instance = self
self.phases = []
self.values = []
self.import_phases = []
self.import_values = []
self.export = None
self.vl = QVBoxLayout()
header = ['Phase', 'Synthetic', 'Observation', 'Delta']
self.table = QTableWidget()
self.table.verticalHeader().setVisible(False)
self.table.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.table.setColumnCount(len(header))
self.table.setHorizontalHeaderLabels(header)
self.vl.addWidget(self.table)
hl = QHBoxLayout()
hl.setAlignment(Qt.AlignRight)
export_button = QPushButton('Export...')
export_menu = QMenu()
export_menu.addAction('\\t separated').triggered.connect(lambda : self._on_export('\t'))
export_menu.addAction(', separated').triggered.connect(lambda : self._on_export(','))
export_menu.addAction('; separated').triggered.connect(lambda : self._on_export(';'))
export_button.setMenu(export_menu)
hl.addWidget(export_button)
self.vl.addLayout(hl)
self.setLayout(self.vl)
def set_data(self, phases, values, import_phases, import_values):
self.phases = phases
self.values = values
self.import_phases = import_phases
self.import_values = import_values
self.table.setRowCount(len(self.phases))
for (index, phase) in enumerate(self.phases):
phase_item = QTableWidgetItem('%.12f' % phase)
phase_item.setTextAlignment(Qt.AlignRight)
value_item = QTableWidgetItem('%.12f' % self.values[index])
value_item.setTextAlignment(Qt.AlignRight)
self.table.setItem(index, 0, phase_item)
self.table.setItem(index, 1, value_item)
if phase in import_phases :
import_index = import_phases.index(phase)
value_item = QTableWidgetItem('%.12f' % import_values[import_index])
value_item.setTextAlignment(Qt.AlignRight)
delta_flux_item = QTableWidgetItem('%.12f' % (import_values[import_index] - self.values[index]))
delta_flux_item.setTextAlignment(Qt.AlignRight)
self.table.setItem(index, 2, value_item)
self.table.setItem(index, 3, delta_flux_item)
def _on_export(self, separator):
self.export = ExportDatDialog(self.phases, self.values, self.import_phases, self.import_values, separator)
pass
@staticmethod
def instance():
return ResultTab.__instance
class ExportDatDialog(QFileDialog):
def __init__(self, phases, values, import_phases, import_values, separator):
super(ExportDatDialog, self).__init__()
self.setWindowTitle('Export DAT')
#self.setWindowIcon(QIcon('assets/export.png'))
self.resize(500, 400)
self.setFileMode(QFileDialog.AnyFile)
filename = self.getSaveFileName(directory='result.dat', filter='DAT (*.dat);;')
try:
with open(filename, 'wb') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=separator)
for index, phase in enumerate(phases):
row = []
row.append('%.12f' % phase)
row.append('%.12f' % values[index])
if phase in import_phases :
import_index = import_phases.index(phase)
row.append('%.12f' % import_values[import_index])
row.append('%.12f' % (import_values[import_index] - values[index]))
csv_writer.writerow(row)
except:
QMessageBox.warning(self, "Error", "Error exporting!\nError: " + str(sys.exc_info()[1]))
raise
class ResultsTab(QWidget):
def __init__(self):
QWidget.__init__(self)
vl = QVBoxLayout()
self.setLayout(vl)
table = ResultsTable()
vl.addWidget(table)
hl = QHBoxLayout()
hl.setAlignment(Qt.AlignRight)
vl.addLayout(hl)
delete_all_button = QPushButton('Delete all')
delete_all_button.clicked.connect(self._on_delete_all_clicked)
hl.addWidget(delete_all_button)
def _on_delete_all_clicked(self):
Global.event.interface_delete_all_results_clicked.emit()
class ResultsTable(QTableView):
def __init__(self):
QTableView.__init__(self)
self.last_sort_column = 0
self.last_sort_order = Qt.AscendingOrder
self.last_selected_row = 0
self.last_scroll_position = 0
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setSelectionMode(QAbstractItemView.SingleSelection)
self.setSortingEnabled(True)
self.verticalHeader().setVisible(False)
self.horizontalHeader().setHighlightSections(False)
self.horizontalHeader().setMovable(True)
self.horizontalHeader().setContextMenuPolicy(Qt.CustomContextMenu)
self.horizontalHeader().customContextMenuRequested.connect(self._on_header_menu)
self.doubleClicked.connect(self._on_row_double_clicked)
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self._on_row_menu)
Global.event.tasks_list_updated.connect(self._on_tasks_list_updated)
def keyPressEvent(self, event):
if event.type() == QEvent.KeyPress and event.key() == Qt.Key_Delete:
row = self.currentIndex().row()
task = self.get_task_by_row(row)
if task:
self.delete_task_by_id(task.id)
elif event.type() == QEvent.KeyPress and (event.key() == Qt.Key_Enter or event.key() == Qt.Key_Return):
Global.event.task_selected.emit(self.get_task_by_row(self.currentIndex().row()))
else:
return QTableView.keyPressEvent(self, event)
def _on_header_menu(self, point):
menu = QMenu()
for index, title in enumerate(self.model().header):
action = QAction(self)
action.setData(index)
action.setText(title)
action.setCheckable(True)
action.setChecked(False if self.isColumnHidden(index) else True)
action.triggered.connect(self._on_header_menu_action)
menu.addAction(action)
menu.popup(self.mapToGlobal(point))
menu.exec_()
def _on_header_menu_action(self, checked):
index = self.sender().data().toInt()[0]
if checked:
self.showColumn(index)
else:
self.hideColumn(index)
def _on_row_menu(self, point):
row = self.rowAt(point.y())
task = self.get_task_by_row(row)
if row < 0 or task is None:
return
menu = QMenu()
load_action = QAction(self)
load_action.setData(task.id)
load_action.setText("Load parameters")
load_action.triggered.connect(self._on_load_params_action)
menu.addAction(load_action)
delete_action = QAction(self)
delete_action.setData(task.id)
delete_action.setText('Delete')
delete_action.triggered.connect(self._on_row_delete_action)
menu.addAction(delete_action)
menu.popup(self.mapToGlobal(point))
menu.exec_()
def _on_load_params_action(self):
id = self.sender().data().toInt()[0]
Global.event.interface_load_task_params.emit(self.get_task_by_id(id))
def _on_row_delete_action(self):
id = self.sender().data().toInt()[0]
self.delete_task_by_id(id)
def delete_task_by_id(self, id):
task = self.get_task_by_id(id)
if task:
Global.delete_task(task)
def get_task_by_id(self, id):
for task in self.model().tasks:
if task.id == id:
return task
return None
def get_task_by_row(self, row):
if self.model() and -1 < row < len(self.model().tasks_data):
return self.get_task_by_id(self.model().tasks_data[row][0])
return None
def _on_tasks_list_updated(self):
if self.model():
self.last_sort_column = self.model().last_sort_column
self.last_sort_order = self.model().last_sort_order
self.last_selected_row = self.currentIndex().row()
self.last_scroll_position = self.verticalScrollBar().sliderPosition()
self.setModel(ResultsTableModel(Global.tasks()))
self.sortByColumn(self.last_sort_column, self.last_sort_order)
self.resizeColumnsToContents()
self.horizontalHeader().setStretchLastSection(True)
self.selectRow(self.last_selected_row)
self.verticalScrollBar().setSliderPosition(self.last_scroll_position)
def _on_row_double_clicked(self, index):
target_id = self.model().tasks_data[index.row()][0]
for task in self.model().tasks:
if task.id == target_id:
Global.event.task_selected.emit(task)
break
class ResultsTableModel(QAbstractTableModel):
def __init__(self, tasks):
QAbstractTableModel.__init__(self)
self.tasks = tasks
self.tasks_data = []
self.last_sort_column = 0
self.last_sort_order = Qt.AscendingOrder
self.header = ['#',
'Sma',
'Rs',
'Rp',
'Ts',
'Tp',
'Inc.',
'Darkening law',
'chi^2']
for task in tasks:
self.tasks_data.append([task.id,
task.input.semi_major_axis,
task.input.star_radius,
task.input.planet_radius,
task.input.star_temperature,
task.input.planet_temperature,
task.input.inclination,
task.input.darkening_law + '(' + str(task.input.darkening_coefficient_1) + ', ' + str(task.input.darkening_coefficient_2) + ')',
task.result.chi2])
def rowCount(self, parent):
return len(self.tasks_data)
def columnCount(self, parent):
return len(self.tasks_data[0]) if len(self.tasks_data) else 0
def data(self, index, role):
if not index.isValid():
return QVariant()
elif role == Qt.TextAlignmentRole:
return QVariant(Qt.AlignRight | Qt.AlignVCenter)
elif role != Qt.DisplayRole:
return QVariant()
return QVariant(self.tasks_data[index.row()][index.column()])
def headerData(self, col, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return QVariant(self.header[col])
return QVariant()
def sort(self, col, order):
self.last_sort_column = col
self.last_sort_order = order
self.layoutAboutToBeChanged.emit()
self.tasks_data = sorted(self.tasks_data, key=operator.itemgetter(col))
if order == Qt.DescendingOrder:
self.tasks_data.reverse()
self.layoutChanged.emit() | mit |
Ginfung/sway | Benchmarks/XOMO_Base/xomo.py | 1 | 41536 | import sys
sys.dont_write_bytecode=True
def demo(f=None,demos=[]):
if f: demos.append(f); return f
for d in demos:
print '\n--|',d.func_name,'|','-'*40,'\n',d.__doc__,'\n'
d()
def test(f=None,tests=[]):
if f: tests.append(f); return f
ok=no=0
for t in tests:
print "# ",t.func_name + ': ',t.__doc__
for n,(want,got) in enumerate(t()):
if want == got:
ok += 1; print "PASSED:",t.func_name,n+1
else:
no += 1; print "FAILED:",t.func_name,n+1
if tests:
print '\n# totals: %s%% PASSED' % round(100*ok/(ok+no))
@test
def tested1():
"Test functions return lists of (want,got) pairs"
return [(1,1),(0,1),(2,2),(1,0)]
@test
def tested2():
"Test function can return one pair"
return [(1,1)]
import random
class Deep(dict) :
def __getitem__(self,x) :
if x in self: return self.get(x)
new = self[x] = Deep()
return new
def push(self,k,v) :
all = self[k] if k in self else []
all.append(v)
self[k]=all
return all
def at(self,lst,default=None) :
here=self
for key in lst:
if not key in here:
return default
here = here[key]
return here
def inc(self,k,n=1):
new = (self[k] if k in self else 0) + n
self[k] = new
return new
@demo
def _deeped() :
"Auto-generation of nested dictionaries"
d=Deep()
d[1][2][3] = 22
d[1][2][4] = 44
print d
class Sample(object):
def one(o) : pass
def ready(o) : pass
def some(o,max=100):
o.ready()
while True:
max -= 1
if max < 0 : break
yield o.one()
class Some(Sample):
def __init__(o,txt):
o.txt=txt
def bias(o,lst,scale=2):
o.all = [(n**scale,x) for n,x in enumerate(lst)]
return o
def ready(o):
o.all = sorted(o.all,key=lambda x: x[0])
o.total = 0
for n,x in o.all: o.total += n
def one(o):
chosen = random.uniform(0,o.total)
count = 0
for n,x in o.all:
count += n
if count > chosen: return x
@demo
def somed1():
"Biased list"
somed0(Some("xx").bias([x for x in xrange(0,25)]),0.2)
def somed0(r,shrink=2,n=1000):
def show(k,v) :
return str(k).rjust(10)+ ' : '+ \
'*'*int(v)+ ' [%3.2f]%%'%int(shrink*v)
all = Deep()
random.seed(1)
for x in r.some(max=n): all.inc(round(x,1))
print ""
#order = sorted([int(x) for x in all.keys()])
order = sorted(all.keys())
for k in order:
v = all[k]
print "DEBUG: " + str(v) + " " + str(k)
# for k,v in sorted(all.items(),key=all.get):
print show(k,100.00/shrink*v/n)
class Runiform(Sample):
def one(o):
return o.final(random.uniform(o.lo,o.hi))
def __init__(o,txt,lo,hi,final=float):
o.txt= txt; o.lo= lo; o.hi= hi; o.final= final
@demo
def somed1():
"Uniform 1 to 5"
somed0(Runiform("xx",1,5,int),0.5)
class Rtriangle(Sample):
def one(o):
return o.final(random.triangular(o.lo,o.hi,o.mode))
def __init__(o,txt,lo,hi,mode,final=float):
o.txt=txt; o.lo=lo; o.hi=hi; o.mode=mode; o.final=final
@demo
def somed2():
"Triangle min,max,mode = 0,20,4"
somed0(Rtriangle("xx",0,20,4,int),0.25)
class Rgamma(Sample):
def one(o):
return o.final(random.gammavariate(o.a,o.b))
def __init__(o,txt,a,b,final=float):
o.txt= txt; o.a= a; o.b= b; o.final=final
@demo
def somed3():
"Gamma a,b = 5,1"
somed0(Rgamma("xx",6,1,int),0.33)
class Rexpo(Sample):
def one(o):
return o.final(random.expovariate(o.lambd))
def __init__(o,txt,lambd,final=float):
o.txt= txt; o.lambd= lambd; o.final= final
@demo
def somed4():
"Lambda, decay constant=0.7"
somed0(Rexpo("xx",0.7,int),1)
class Rgauss(Sample):
def one(o):
return o.final(random.gauss(o.mu,o.sigma))
def __init__(o,txt,mu,sigma,final=float):
o.txt= txt; o.mu= mu; o.sigma= sigma; o.final= final
@demo
def somed5():
"Guassian, mean=20, sigma=2"
somed0(Rgauss("xx",20,2,int),0.5)
class Rsteps(Sample):
def __init__(o,txt,bins=7,final=float):
o.txt= txt; o.bins= bins; o.final= final
o.all=[]; o.stale=False
def bias(o,lst):
o.stale=True
o.all=lst
def put(o,x):
o.stale=True
o.all.append(x)
def ready(o):
if o.stale:
o.all = sorted(o.all)
split = max(1, int(len(o.all)/o.bins))
o.all = [o.all[int(bin*split)] for bin in range(o.bins)]
o.stale=False
def __sub__(o1,o2):
o1.ready(); o2.ready()
diff = sum1 = sum2 = 0.0
for n1 in o1.all: sum1 += n1
for n2 in o2.all: sum2 += n2
for n1,n2 in zip(o1.all,o2.all) :
print n1,n2,sum1,sum2
diff += (n1/sum1 - n2/sum2)
return 100*diff
def one(o):
o.ready()
n = random.randint(1,o.bins-1)
return o.final(random.uniform( o.all[n-1], o.all[n]))
@demo
def somed6():
"Divide Data into 5 steps"
lst = [x for x in xrange(0,33)]
somed0(Rsteps("xx",7,int).bias(lst))
import random
def any(l):
return l[random.randint(0,len(l)-1)]
def chunks(l, n):
"Divide 'l' into sub-lists of length 'n'."
return [l[i:i+n] for i in range(0, len(l), n)]
def often(seq,max=100,
item = lambda x: x,
weight = lambda x: x.priority) :
total = 0
for x in seq: total += weight(x)
while True:
max -= 1
if max < 0 : break
chosen = random.uniform(0, total)
count = 0
for x in seq:
count += weight(x)
if count > chosen:
yield item(x)
break
@test
def oftened():
"select, with bias, from a space"
def left(x) : return x[0]
def right(x): return x[1]
counts = Deep()
random.seed(1)
for x in often([("a",10),("b",20),("c",40),("d",80)],
max=1000,item=left,weight=right):
counts.inc(x)
return [(counts,{'a': 67, 'c': 265, 'b': 113, 'd': 555})]
import random
class X2Y(object):
def x(o): pass
def y(o,x) : pass
def xy(o):
x = o.x()
y = o.y(x)
return x,y
class Range(X2Y):
def __init__(o,name,min,max,final=float,wild=False):
o.txt = name
o.wild = wild
o.update(min,max,final)
def update(o,min,max,final=float,m=None):
o.min = min
o.max = max
o.sample = Runiform(o.txt,min,max,final)
if m:
m.all[o.txt] = o
def x(o):
return o.sample.one()
class Model(X2Y):
def __init__(o):
o.all = {}
for i in o.about():
o.all[i.txt] = i
def x(o):
out = {}
for what,thing in o.all.items(): out[what] = thing.x()
return out
def about(o): pass
#################
##--SCED-RISK--##
#################
def totalRisk(project, risktable):
_d = 3.73
return (sced_risk(project, risktable) +
prod_risk(project, risktable) +
pers_risk(project, risktable) +
proc_risk(project, risktable) +
plat_risk(project, risktable) +
reus_risk(project, risktable)) / _d
def getRisk(a, b, project, risks):
if (a in project) and (b in project):
_aval = int(project[a])
_bval = int(project[b])
try:
_rt = risks[a, b]
_rt = _rt.split(",") # split table
_rt = _rt[_bval-1] # grab line
_rt = _rt.split() # split line
return float(_rt[_aval-1]) # find index
except KeyError:
return 0
else: return 0
def sced_risk(proj, risks):
_a = 'sced'
return(sum(map(lambda x : getRisk(_a,x,proj,risks),
str.split("rely time pvol tool acap "
"aexp pcap plex ltex pmat"))))
def prod_risk(proj, risks):
return(sum(map(lambda (x,y) : getRisk(x,y,
proj,risks),
[("rely","acap"), ("rely","pcap"),
("cplx","acap"), ("cplx","pcap"),
("cplx","tool"), ("rely","pmat"),
("sced","cplx"), ("sced","rely"),
("sced","time"), ("ruse","aexp"),
("ruse","ltex")])))
def pers_risk(proj, risks):
return(sum(map(lambda (x,y) : getRisk(x,y,
proj,risks),
[("pmat","acap"), ("stor","acap"),
("time","acap"), ("tool","acap"),
("tool","pcap"), ("ruse","aexp"),
("ruse","ltex"), ("pmat","pcap"),
("stor","pcap"), ("time","pcap"),
("ltex","pcap"), ("pvol","plex"),
("sced","acap"), ("sced","aexp"),
("sced","pcap"), ("sced","plex"),
("sced","ltex"), ("rely","acap"),
("rely","pcap"), ("cplx","acap"),
("cplx","pcap"), ("team","aexp")
])))
def proc_risk(proj, risks):
return(sum(map(lambda (x,y) : getRisk(x,y,
proj,risks),
[("tool","pmat"), ("time","tool"),
("team","aexp"), ("team","sced"),
("team","site"), ("sced","tool"),
("sced","pmat"), ("cplx","tool"),
("pmat","acap"), ("tool","acap"),
("tool","pcap"), ("pmat","pcap")
])))
def plat_risk(proj, risks):
return(sum(map(lambda (x,y) : getRisk(x,y,
proj,risks),
[("sced","time"), ("sced","pvol"),
("stor","acap"), ("time","acap"),
("stor","pcap"), ("pvol","plex"),
("time","tool")])))
def reus_risk(project, risktable):
return(getRisk('ruse','aexp',project,risktable) +
getRisk('ruse','ltex',project,risktable))
#############
def readRisks(risktable):
risktable['sced','rely'] = ("0 0 0 1 2 0,"
"0 0 0 0 1 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['sced','cplx'] = ("0 0 0 1 2 4,"
"0 0 0 0 1 2,"
"0 0 0 0 0 1,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['sced','time'] = ("0 0 0 1 2 4,"
"0 0 0 0 1 2,"
"0 0 0 0 0 1,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['sced','pvol'] = ("0 0 0 1 2 0,"
"0 0 0 0 1 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['sced','tool'] = ("2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['sced','pexp'] = ("4 2 1 0 0 0,"
"2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['sced','pcap'] = ("4 2 1 0 0 0,"
"2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['sced','aexp'] = ("4 2 1 0 0 0,"
"2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['sced','acap'] = ("4 2 1 0 0 0,"
"2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['sced','ltex'] = ("2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['sced','pmat'] = ("2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['rely','acap'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"4 2 1 0 0 0,"
"0 0 0 0 0 0")
risktable['rely','pcap'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"4 2 1 0 0 0,"
"0 0 0 0 0 0")
risktable['cplx','acap'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"4 2 1 0 0 0")
risktable['cplx','pcap'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"4 2 1 0 0 0")
risktable['cplx','tool'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"4 2 1 0 0 0")
risktable['rely','pmat'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"4 2 1 0 0 0,"
"0 0 0 0 0 0")
risktable['pmat','acap'] = ("2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['stor','acap'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"4 2 1 0 0 0")
risktable['time','acap'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"4 2 1 0 0 0")
risktable['tool','acap'] = ("2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['tool','pcap'] = ("2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['ruse','aexp'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"4 2 1 0 0 0")
risktable['ruse','ltex'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"4 2 1 0 0 0")
risktable['pmat','pcap'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"4 2 1 0 0 0")
risktable['stor','pcap'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"4 2 1 0 0 0")
risktable['time','pcap'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"4 2 1 0 0 0")
risktable['ltex','pcap'] = ("4 2 1 0 0 0,"
"2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['pvol','pexp'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"0 0 0 0 0 0")
risktable['tool','pmat'] = ("2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['time','tool'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0")
risktable['team','aexp'] = ("2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['team','sced'] = ("2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['team','site'] = ("2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
#############
import math
# CoQualMo Calibration Mods:
DefectIntroReqsNeg = ['pmat','prec','resl','team',
'acap','aexp','docu','ltex',
'pcon','plex','rely','sced',
'site','tool']
DefectIntroDesignNeg = ['pmat','prec','resl','team',
'acap','aexp','docu','ltex',
'pcon','plex','rely','sced',
'site','tool','pcap']
DefectIntroCodingNeg = ['pmat','prec','resl','team',
'acap','aexp','docu','ltex',
'pcon','plex','rely','sced',
'site','tool','pcap']
DefectIntroReqsPos = ['cplx','Data','pvol',
'ruse','stor','time']
DefectIntroDesignPos = ['cplx','Data','pvol',
'ruse','stor','time']
DefectIntroCodingPos = ['cplx','Data','pvol',
'ruse','stor','time']
DefectIntroReqsIgnore = ["flex","pcap"]
DefectIntroDesignIgnore = ["flex"]
DefectIntroCodingIgnore = ["flex"]
DefectRemovers = ["aa","etat","pr"]
class Cocomo(Model):
def __init__(o, *args, **kwargs):
o.bounds = {}
_s = "flex pmat prec resl team"
o.scaleFactors = _s.split()
_s = ("acap aexp cplx Data docu ltex "
"pcap pcon plex pvol rely ruse "
"sced site stor time tool")
o.effortMultipliers = _s.split()
_s = "aa etat pr"
o.defectRemovers = _s.split()
# Open file of constraints (?):
for _a in args:
if isinstance(_a, basestring):
try:
_acc = {}
with open(_a,'r') as infile:
for line in infile:
line = line.split()
if line[0] == '@project':
o.proj = line[1]
elif line[0] == '@discrete':
if line[1][0] == '?':
_attr = line[1][1:]
_lo = float(line[2])
_hi = float(line[len(line)-1])
else:
_attr = line[1]
_lo = _hi = int(line[2])
_acc[_attr] = _lo,_hi
# Overwrite file constraints w/kwargs:
kwargs = dict(_acc.items() + kwargs.items())
break
except:
print "Input file [", _a, "] not readable"
# Read constraints from kwargs:
for _key,_val in kwargs.items():
# print _key, _val
if ((_key in o.scaleFactors) or
(_key in o.effortMultipliers) or
(_key in o.defectRemovers) or
(_key in ["kloc","b"])):
if isinstance(_val, tuple):
_lo,_hi = _val
else:
_lo = _hi = _val
o.bounds[str(_key)] = _lo,_hi
# Parent init:
super(o.__class__,o).__init__()
def say(o,x,a,b,kloc,sum,prod,exp,
effort, # o1\o2,o3,o4
months="nc",defects="nc",risks="nc"):
for i,j in x.items():
if i=="kloc": print i,j
else: print i,j,str(o.all[i].y(j))
print (":a",a,":b",b,":kloc",kloc,":exp",exp,
":sum",sum,":prod",prod, "effort",effort,
"months",months,"defects",defects,
"risks",risks)
def sumSfs(o,x,out=0,reset=False):
for i in o.scaleFactors:
out += o.all[i].y(x[i],reset)
return out
def prodEms(o,x,out=1,reset=False):
for i in o.effortMultipliers:
out *= o.all[i].y(x[i],reset) #changed_nave
return out
def xy(o,verbose=False):
x = o.x()
a = x["b"] # a little tricky... "a" is the x of "b"
b = o.all["b"].y(a,reset=True)
kloc = o.all["kloc"].x()
sum = o.sumSfs(x,reset=True)
prod = o.prodEms(x,reset=True)
exp = b + 0.01 * sum
effort = a*(kloc**exp)*prod
if verbose: o.say(x,a,b,kloc,sum,prod,exp,effort)
return x,effort
def xys(o,verbose=False,olist=False):
x = o.x()
a = x["b"]
b = o.all["b"].y(a,reset=True)
kloc = x["kloc"]
sum = o.sumSfs(x,reset=True)
prod = o.prodEms(x,reset=True)
exp = b + 0.01 * sum
effort = o.effort_calc(x, a, b, exp, sum, prod)
months = o.month_calc(x, effort, sum, prod)
defects = o.defect_calc(x)
risks = o.risk_calc(x)
if verbose: o.say(x,a,b,kloc,sum,prod,exp,
effort,months,defects,risks)
if olist:
return [effort,months,defects,risks]
else:
return x,effort,months,defects,risks
def trials(o,n=500,out="out.csv",verbose=True,write=False):
import csv
keys = []
_efforts = []
_months = []
_defects = []
_risks = []
_first = 0
rows = []
with open(out,'w') as csv_file:
if write: csv_wri = csv.writer(csv_file)
for _i in range(0,n):
x = o.x()
if _i == 0:
for _k,_ in x.iteritems():
if _first == 0:
keys.append('$'+str(_k))
_first = 1
else:
keys.append('$'+str(_k)) #changed_nave
keys.extend(["-effort","-months",
"-defects","-risks"])
if write: csv_wri.writerows([keys])
a = x["b"]
b = o.all["b"].y(a,reset=True)
kloc = x["kloc"]
sum = o.sumSfs(x,reset=True)
prod = o.prodEms(x,reset=True)
exp = b + 0.01 * sum
effort = o.effort_calc(x,a,b,exp,sum,prod)
months = o.month_calc(x,effort,sum,prod)
defects = o.defect_calc(x)
risks = o.risk_calc(x)
_efforts.append(effort)
_months.append(months)
_defects.append(defects)
_risks.append(risks)
vals = []
for _,_v in x.iteritems():
vals.append(_v)
vals.extend([effort,months,defects,risks])
if write: csv_wri.writerows([vals])
rows.append(vals)
if verbose:
_effSum = math.fsum(_efforts)
_mosSum = math.fsum(_months)
_defSum = math.fsum(_defects)
_rskSum = math.fsum(_risks)
_effMean = _effSum/n
_mosMean = _mosSum/n
_defMean = _defSum/n
_rskMean = _rskSum/n
_effSD = pow( math.fsum(map(lambda x: pow(x-_effMean,2),_efforts))/n, 0.5)
_mosSD = pow( math.fsum(map(lambda x: pow(x-_mosMean,2),_months))/n, 0.5)
_defSD = pow( math.fsum(map(lambda x: pow(x-_defMean,2),_defects))/n, 0.5)
_rskSD = pow( math.fsum(map(lambda x: pow(x-_rskMean,2),_risks))/n, 0.5)
_efforts.sort()
_months.sort()
_defects.sort()
_risks.sort()
print "Means:"
print "\tEff:",_effMean,"\n\tMos:",_mosMean,"\n\tDef:",_defMean,"\n\tRsk:",_rskMean
print ""
print "Standard Deviations:"
print "\tEff:",_effSD,"\n\tMos:",_mosSD,"\n\tDef:",_defSD,"\n\tRsk:",_rskSD
print ""
print "Quartile Bounds (25/50/75):"
print "\tEff:", _efforts[int(.25*n)],"\t",\
_efforts[int(.5*n)],"\t",\
_efforts[int(.75*n)], \
"\n\tMos:", _months[int(.25*n)],"\t",\
_months[int(.5*n)],"\t",\
_months[int(.75*n)], \
"\n\tDef:", _defects[int(.25*n)],"\t",\
_defects[int(.5*n)] ,"\t",\
_defects[int(.75*n)], \
"\n\tRsk:", _risks[int(.25*n)],"\t",\
_risks[int(.5*n)],"\t",\
_risks[int(.75*n)]
return keys,rows
def about(o):
def dr(what, lo=1,hi=6) : return Dr(what,lo,hi)
def sf(what, lo=1,hi=5) : return Sf(what,lo,hi)
def emn(what,lo=1,hi=5) : return Emn(what,lo,hi)
def emp(what,lo=1,hi=5) : return Emp(what,lo,hi)
_rtn = []
# kloc:
if "kloc" in o.bounds:
_lo,_hi = o.bounds["kloc"]
else: _lo,_hi = 2,1000
_rtn.append( Range("kloc",_lo,_hi) )
# b (becomes 'a')
if "b" in o.bounds:
_lo,_hi = o.bounds["b"]
_rtn.append( B("b",_lo,_hi) )
else:
_lo,_hi = 3,10
_rtn.append( B("b",3,10,wild=True) )
# Defect Removers:
for _dr in ["aa",
"etat",
"pr" ]:
if _dr in o.bounds:
_lo,_hi = o.bounds[_dr]
_rtn.append( dr(_dr,_lo,_hi) )
else:
_rtn.append( dr(_dr) )
# Scale Factors:
for _sf in ["prec", "flex",
"resl", "team",
"pmat" ]:
if _sf in o.bounds:
_lo,_hi = o.bounds[_sf]
_rtn.append( sf(_sf,_lo,_hi) )
else:
_rtn.append( sf(_sf) )
# Effort Multipliers, Positive Slope
for _emp, _rng in [ ( "rely", (1,5) ),
( "Data", (2,5) ),
( "cplx", (1,6) ),
( "ruse", (2,6) ),
( "docu", (1,5) ),
( "time", (3,6) ),
( "stor", (3,6) ),
( "pvol", (2,5) )]:
if _emp in o.bounds:
_lo,_hi = o.bounds[_emp]
else:
_lo,_hi = _rng
_rtn.append( emp(_emp,_lo,_hi) )
# Effort Multipliers, Negative Slope
for _emn in ["acap", "pcap",
"pcon", "aexp",
"plex", "ltex",
"tool", "site",
"sced" ]:
if _emn in o.bounds:
_lo,_hi = o.bounds[_emn]
else:
if _emn == "site":
_hi = 6 # Special case
else:
_hi = 5 # (Default)
_lo = 1
_rtn.append( emn(_emn,_lo,_hi) )
return _rtn
def effort_calc(o, x,
a=-1, b=-1, exp=-1,
sum=-1, prod=-1):
if a == -1: a = x["b"]
if b == -1: b = o.all["b"].y(a)
if sum == -1: sum = o.sumSfs(x)
if exp == -1: exp = b + 0.01 * sum
if prod == -1: prod = o.prodEms(x)
return a*x["kloc"]**exp*prod
def month_calc(o, x, effort,
sum=-1, prod=-1):
if sum == -1: sum = o.sumSfs(x)
if prod == -1: prod = o.prodEms(x)
_c = 3.67
_d = 0.28
_sced = int(x["sced"])
_scedPercent = 0
if (_sced == 1):
_scedPercent = 75
elif (_sced == 2):
_scedPercent = 85
elif (_sced == 3):
_scedPercent = 100
elif (_sced == 4):
_scedPercent = 130
elif (_sced == 5):
_scedPercent = 160
_pmNs = (effort /
float(o.all["sced"].y(x["sced"])))
_elessb = 0.01 * sum
_f = _d + (0.2 * _elessb)
return _c * pow(_pmNs,_f) * (_scedPercent/100.0)
def defect_calc(o, x):
return (o.defects("requirements", x) +
o.defects("design", x) +
o.defects("code", x))
def defects(o, dtype, x):
_ksloc = float(x["kloc"])
_introduced = 0
import time
time.sleep(0.01)
if (dtype == "requirements"):
_introduced = (10 * _ksloc *
o.defectsIntroduced(dtype,x))
elif (dtype == "design"):
_introduced = (20 * _ksloc *
o.defectsIntroduced(dtype,x))
elif (dtype == "code"):
_introduced = (30 * _ksloc *
o.defectsIntroduced(dtype,x))
_percentRemoved = o.defectsRemovedRatio(dtype,x)
return _introduced * _percentRemoved
def defectsRemovedRatio(o, dtype,x):
_product = 1
for _key in o.defectRemovers:
if _key in x:
if (dtype == "requirements"):
_product *= (1 - float(
o.all[
_key
].calibs.defectRemovalReqs.y(x[_key])
))
elif (dtype == "design"):
_product *= (1 - float(
o.all[
_key
].calibs.defectRemovalDesign.y(x[_key])
))
elif (dtype == "code"):
_product *= (1 - float(
o.all[
_key
].calibs.defectRemovalCoding.y(x[_key])
))
return _product
def totalDefectsIntroduced(o,x):
_ksloc = x["kloc"]
return (10 * _ksloc *
o.defectsIntroduced(
"requirements", x) +
20 * _ksloc *
o.defectsIntroduced(
"design", x) +
30 * _ksloc *
o.defectsIntroduced(
"code", x))
def defectsIntroduced(o, dtype, x):
_product = 1
for _key in o.scaleFactors:
if _key in x:
if (dtype == "requirements"):
_product *= float(
o.all[
_key
].calibs.defectIntroReqs.y(x[_key])
)
elif (dtype == "design"):
_product *= float(
o.all[
_key
].calibs.defectIntroDesign.y(x[_key])
)
elif (dtype == "code"):
_product *= float(
o.all[
_key
].calibs.defectIntroCoding.y(x[_key])
)
else:
print ("Err: " + _key +
" not defined in source input")
for _key in o.effortMultipliers:
if _key in x:
if (dtype == "requirements"):
_product *= float(
o.all[
_key
].calibs.defectIntroReqs.y(x[_key])
)
elif (dtype == "design"):
_product *= float(
o.all[
_key
].calibs.defectIntroDesign.y(x[_key])
)
elif (dtype == "code"):
_product *= float(
o.all[
_key
].calibs.defectIntroCoding.y(x[_key])
)
else:
print ("Err: " + _key +
" not defined in source input")
return _product
def risk_calc(o, x):
rt = {}
readRisks(rt)
return totalRisk(x, rt)
class Calibrations():
"""CoQualMo calibration settings for
a given CoCoMo attribute"""
def __init__(o,txt):
# Requirements:
if txt in DefectIntroReqsPos:
o.defectIntroReqs = Calib('Intro',
'Reqs',
1 )
elif txt in DefectIntroReqsNeg:
o.defectIntroReqs = Calib('Intro',
'Reqs',
-1 )
elif txt in DefectIntroReqsIgnore:
o.defectIntroReqs = Calib('Intro',
'Reqs',
0 )
else: o.defectIntroReqs = None
# Design:
if txt in DefectIntroDesignPos:
o.defectIntroDesign = Calib('Intro',
'Design',
1 )
elif txt in DefectIntroDesignNeg:
o.defectIntroDesign = Calib('Intro',
'Design',
-1 )
elif txt in DefectIntroDesignIgnore:
o.defectIntroDesign = Calib('Intro',
'Design',
0 )
else: o.defectIntroDesign = None
# Coding:
if txt in DefectIntroCodingPos:
o.defectIntroCoding = Calib('Intro',
'Coding',
1 )
elif txt in DefectIntroCodingNeg:
o.defectIntroCoding = Calib('Intro',
'Coding',
-1 )
elif txt in DefectIntroCodingIgnore:
o.defectIntroCoding = Calib('Intro',
'Coding',
0 )
else: o.defectIntroCoding = None
# Removal:
if txt in DefectRemovers:
o.defectRemovalReqs = Calib('Removal',
'Reqs',
0)
o.defectRemovalDesign = Calib('Removal',
'Design',
0)
o.defectRemovalCoding = Calib('Removal',
'Coding',
0)
class Calib():
"""CoQualMo calibration Data generator"""
def __init__(o, phase, category, sign):
o.phase = phase # Intro/Removal
o.category = category # Reqs/Dsgn/Code
o.sign = sign # Slope Pos/Neg
o.mv = 0
o.mv = o.m(reset=True)
def y(o, x, reset=False):
if o.phase == 'Intro':
return o.m(reset)*(x-3)+1
elif o.phase == 'Removal':
return o.m(reset)*(x-1)
def m(o, reset=False):
if reset:
if o.phase == 'Intro':
if o.category == 'Reqs':
if o.sign > 0:
o.mv = random.uniform(0.0166,.38)
elif o.sign < 0:
o.mv = random.uniform(-0.215,-0.035)
elif o.category == 'Design':
if o.sign > 0:
o.mv = random.uniform(0.0066,0.145)
elif o.sign < 0:
o.mv = random.uniform(-0.325,-0.05)
elif o.category == 'Coding':
if o.sign > 0:
o.mv = random.uniform(0.0066,0.145)
elif o.sign < 0:
o.mv = random.uniform(-0.29,-0.05)
elif o.phase == 'Removal':
if o.category == 'Reqs':
o.mv = random.uniform(0.0,0.14)
elif o.category == 'Design':
o.mv = random.uniform(0.0,0.156)
elif o.category == 'Coding':
o.mv = random.uniform(0.1,0.176)
return o.mv
class Sf(Range):
"""Scale Factor"""
def __init__(o, *args, **kwargs):
super(o.__class__,o).__init__(*args,**kwargs)
o.mv = random.uniform(-0.972,-0.648)
o.calibs = Calibrations(o.txt)
def y(o,x,reset=False): return o.m(reset)*(x - 6)
def m(o,reset=False):
if reset:
o.mv = random.uniform(-0.972,-0.648)
return o.mv
class Dr(Range):
"""Defect Remover"""
def __init__(o, *args, **kwargs):
super(o.__class__,o).__init__(*args,**kwargs)
o.calibs = Calibrations(o.txt)
def y(o,x,reset=False):
pass
class Em(Range):
"""Effort Multiplier"""
def y(o,x,reset=False):
return o.m(reset)*(x-3)+1
class Emp(Em):
"""Effort Multiplier, Positive slope"""
def __init__(o, *args, **kwargs):
super(o.__class__,o).__init__(*args,**kwargs)
o.mv = random.uniform(0.055,0.15)
o.calibs = Calibrations(o.txt)
def m(o,reset=False):
if reset:
o.mv = random.uniform(0.055,0.15)
return o.mv
class Emn(Em):
"""Effort Multiplier, Negative slope"""
def __init__(o, *args, **kwargs):
super(o.__class__,o).__init__(*args,**kwargs)
o.mv = random.uniform(-0.166,-0.075)
o.calibs = Calibrations(o.txt)
def m(o,reset=False):
if reset:
o.mv = random.uniform(-0.166,-0.075)
return o.mv
class B(Range):
def __init__(o, *args, **kwargs):
super(o.__class__,o).__init__(*args,**kwargs)
o.rval = random.random()
def y(o,x,reset=False):
if reset:
o.rval = random.random()
return -0.036 * x + 1.1 - 0.1*o.rval - 0.05
# import os
# import sys
# def coced0(output=os.environ["HOME"]+"/tmp",
# Data = "./Data",
# model=None):
# if not model:
# if len(sys.argv) > 1:
# model = sys.argv[1]
# else:
# model="flight"
# _c = Cocomo(Data + '/' + model)
# _c.xys(verbose=False)
# out = output + "/" + model + ".csv"
# _c.trials(out=out,verbose=False)
# sys.stderr.write("# see" + out + "\n")
#coced0()
def coced1(max=1000):
import matplotlib.pyplot as plt
random.seed(1)
c = Cocomo()
n = 0
out= sorted([c.xy() for x in range(max)],
key=lambda x: x[1])
xs=[]
ys=[]
for x,y in out:
n += 1
xs.append(n)
ys.append(y)
p1, = plt.plot(xs,ys,'ro')
p2, = plt.plot(xs,[x*2 for x in ys],'bo')
plt.legend([p2,p1],["small","bigger"],loc=4)
plt.xlim(0,1050)
plt.yscale('log')
plt.ylabel('effort')
plt.xlabel('all efforts, sorted')
plt.show()
#plt.savefig('coced1.png')
#coced1()
def coced1b(max=1000):
import matplotlib.pyplot as plt
random.seed(1)
c = Cocomo()
n = 0
out = sorted([c.xy() for x in range(max)],
key = lambda x: x[1])
xs = []
y1s = []
y2s = []
y3s = []
for x,y1,y2,y3 in out:
n += 1
xs.append(n)
y1s.append(y1)
y2s.append(y2)
y3s.append(y3)
def coced2(max=1000,rounds=10):
#random.seed(1)
c = Cocomo()
coced2a(rounds,c,max)
def coced2a(r,c,max,updates={}):
def h100(x,r=250) : return int(x/r) * r
if r > 0:
for k in updates:
c.all[k].sample = updates[k]
out = [c.xy() for x in range(max)]
efforts = Rsteps("effort[%s]" % r,final=h100)
for _,effort in out:
efforts.all.append(effort)
somed0(efforts,n=max)
better = elite(out)
#for k,v in better.items():
# print "\n",k
#somed0(v,n=max)
coced2a(r-1,c,max,better)
def coced3(max=1000,rounds=20):
random.seed(1)
c = Cocomo()
import matplotlib.pyplot as plt
#plt.yscale('log')
plt.ylabel('effort')
plt.xlabel('all efforts, sorted')
styles=["r-","m-","c-","y-","k-","b-","g-"]
plots=[]
legends=[]
coced3a(0,len(styles)-1,c,max,plt,styles,plots=plots,legends=legends)
plt.legend(plots,legends,loc=2)
plt.xlim(0,1050)
plt.show()
def coced3a(round,rounds,c,max,plt,styles,updates={},plots=[],legends=[]):
def h100(x,r=250) : return int(x/r) * r
if round <= rounds:
for k in updates:
c.all[k].sample = updates[k]
out = [c.xy() for x in range(max)]
better = elite(out)
plot = plt.plot([x for x in range(1000)],
sorted([effort for _,effort in out]),
styles[round],linewidth=round+1)
plots.append(plot)
legends.append("round%s" % round)
coced3a(round+1,rounds,c,max,plt,styles,updates=better,
plots=plots,legends=legends)
def coced4(samples=1000,rounds=15):
#random.seed(1)
c = Cocomo()
import matplotlib.pyplot as plt
#plt.yscale('log')
xs = []
medians=[]
spreads=[]
mosts=[]
coced4a(0,rounds,c,samples,{},xs,medians,spreads,mosts)
plt.ylabel('effort')
plt.xlabel('round')
plt.legend([plt.plot(xs,medians),plt.plot(xs,spreads)],
["median","spread"],
loc=1)
plt.xlim(-0.5,len(medians)+0.5)
plt.ylim(0,1.05*max(medians + spreads + mosts))
plt.show()
def coced4a(round,rounds,c,samples,updates={},xs=[],medians=[],spreads=[],mosts=[]):
if round <= rounds:
print round
for k in updates:
if not c.all[k].wild:
c.all[k].sample = updates[k]
somed0(c.all[k].sample,n=100)
out = [c.xy() for x in range(samples)]
better = elite(out)
ys = sorted([x for _,x in out])
p25,p50,p75= [int(len(ys)*n) for n in [0.25,0.5,0.75]]
medians.append(ys[p50])
spreads.append(ys[p75] - ys[p25])
xs.append(round)
coced4a(round+1,rounds,c,samples,updates=better,
xs=xs,medians=medians,spreads=spreads,mosts=mosts)
def elite(xy,bins=7,top=0.2,final=float,key=lambda x:x[1]):
def r(x) : return "%3.2f" % x
def keep(lst):
keeper = {}
for how,_ in lst:
if not keeper:
for k in how:
keeper[k] = Rsteps(k,bins,final)
for k,v in how.items():
keeper[k].put(v)
return keeper
n = int(top*len(xy))
xy = sorted(xy,key=key)
bests = keep(xy[:n])
rests = keep(xy[n:])
for k,v in bests.items():
print k, bests[k] - rests[k]
return bests
#coced4()
| mit |
james4424/nest-simulator | extras/ConnPlotter/ConnPlotter.py | 19 | 83508 | # -*- coding: utf-8 -*-
#
# ConnPlotter.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# ConnPlotter --- A Tool to Generate Connectivity Pattern Matrices
"""
ConnPlotter is a tool to create connectivity pattern tables.
For background on ConnPlotter, please see
Eilen Nordlie and Hans Ekkehard Plesser.
Connection Pattern Tables: A new way to visualize connectivity
in neuronal network models.
Frontiers in Neuroinformatics 3:39 (2010)
doi: 10.3389/neuro.11.039.2009
Example:
# code creating population and connection lists
from ConnPlotter import ConnectionPattern, SynType
# Case A: All connections have the same "synapse_model".
#
# Connections with weight < 0 are classified as excitatory,
# weight > 0 are classified as inhibitory.
# Each sender must make either excitatory or inhibitory connection,
# not both. When computing totals, excit/inhib connections are
# weighted with +-1.
pattern = ConnectionPattern(layerList, connList)
# Case B: All connections have the same "synapse_model", but violate Dale's law
#
# Connections with weight < 0 are classified as excitatory,
# weight > 0 are classified as inhibitory.
# A single sender may have excitatory and inhibitory connections.
# When computing totals, excit/inhib connections are
# weighted with +-1.
pattern = ConnectionPattern(layerList, connList,
synTypes=(((SynType('exc', 1.0, 'b'),
SynType('inh', -1.0, 'r')),)))
# Case C: Synapse models are "AMPA", "NMDA", "GABA_A", "GABA_B".
#
# Connections are plotted by synapse model, with AMPA and NMDA
# on the top row, GABA_A and GABA_B in the bottom row when
# combining by layer. Senders must either have AMPA and NMDA or
# GABA_A and GABA_B synapses, but not both. When computing totals,
# AMPA and NMDA connections are weighted with +1, GABA_A and GABA_B
# with -1.
pattern = ConnectionPattern(layerList, connList)
# Case D: Explicit synapse types.
#
# If your network model uses other synapse types, or you want to use
# other weighting factors when computing totals, or you want different
# colormaps, you must specify synapse type information explicitly for
# ALL synase models in your network. For each synapse model, you create
# a
#
# SynType(name, tweight, cmap)
#
# object, where "name" is the synapse model name, "tweight" the weight
# to be given to the type when computing totals (usually >0 for excit,
# <0 for inhib synapses), and "cmap" the "colormap": if may be a
# matplotlib.colors.Colormap instance or any valid matplotlib color
# specification; in the latter case, as colormap will be generated
# ranging from white to the given color.
# Synapse types are passed as a tuple of tuples. Synapses in a tuple form
# a group. ConnPlotter assumes that a sender may make synapses with all
# types in a single group, but never synapses with types from different
# groups (If you group by transmitter, this simply reflects Dale's law).
# When connections are aggregated by layer, each group is printed on one
# row.
pattern = ConnectionPattern(layerList, connList, synTypes = \
((SynType('Asyn', 1.0, 'orange'),
SynType('Bsyn', 2.5, 'r'),
SynType('Csyn', 0.5, (1.0, 0.5, 0.0))), # end first group
(SynType('Dsyn', -1.5, matplotlib.pylab.cm.jet),
SynType('Esyn', -3.2, '0.95'))))
# See documentation of class ConnectionPattern for more options.
# plotting the pattern
# show connection kernels for all sender-target pairs and all synapse models
pattern.plot()
# combine synapses of all types for each sender-target pair
# always used red-blue (inhib-excit) color scale
pattern.plot(aggrSyns=True)
# for each pair of sender-target layer pair, show sums for each synapse type
pattern.plot(aggrGroups=True)
# As mode layer, but combine synapse types.
# always used red-blue (inhib-excit) color scale
pattern.plot(aggrSyns=True, aggrGroups=True)
# Show only synases of the selected type(s)
pattern.plot(mode=('AMPA',))
pattern.plot(mode=('AMPA', 'GABA_A'))
# use same color scales for all patches
pattern.plot(globalColors=True)
# manually specify limits for global color scale
pattern.plot(globalColors=True, colorLimits=[0, 2.5])
# save to file(s)
# NB: do not write to PDF directly, this seems to cause artifacts
pattern.plot(file='net.png')
pattern.plot(file=('net.eps','net.png'))
# You can adjust some properties of the figure by changing the
# default values in plotParams.
# Experimentally, you can dump the connection pattern into a LaTeX table
pattern.toLaTeX('pattern.tex', standalone=True)
# Figure layout can be modified by changing the global variable plotParams.
# Please see the documentation for class PlotParams for details.
# Changes 30 June 2010:
# - Singular layers (extent 0x0) are ignored as target layers.
# The reason for this is so that single-generator "layers" can be
# displayed as input.
# Problems:
# - singularity is not made clear visually
# - This messes up the diagonal shading
# - makes no sense to aggregate any longer
"""
# ----------------------------------------------------------------------------
from . import colormaps as cm
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import warnings
__all__ = ['ConnectionPattern', 'SynType', 'plotParams', 'PlotParams']
# ----------------------------------------------------------------------------
# To do:
# - proper testsuite
# - layers of different sizes not handled properly
# (find biggest layer extent in each direction, then center;
# may run into problems with population label placement)
# - clean up main
# - color bars
# - "bad color" should be configurable
# - fix hack for colormaps import
# - use generators where possible (eg kernels?)
# ----------------------------------------------------------------------------
class SynType(object):
"""
Provide information about how synapse types should be rendered.
A singly nested list of SynType objects can be passed to the
ConnectionPattern constructor to specify layout and rendering info.
"""
def __init__(self, name, relweight, cmap):
"""
Arguments:
name Name of synapse type (string, must be unique)
relweight Relative weight of synapse type when aggregating
across synapse types. Should be negative for inhibitory
connections.
cmap Either a matplotlib.colors.Colormap instance or a
color specification. In the latter case, the colormap
will be built from white to the color given. Thus,
the color should be fully saturated. Colormaps should
have "set_bad(color='white')".
"""
self.name, self.relweight = name, relweight
if isinstance(cmap, mpl.colors.Colormap):
self.cmap = cmap
else:
self.cmap = cm.make_colormap(cmap)
# ----------------------------------------------------------------------------
class PlotParams(object):
"""
Collects parameters governing plotting.
Implemented using properties to ensure they are read-only.
"""
class Margins(object):
"""Width of outer margins, in mm."""
def __init__(self):
"""Set default values."""
self._left = 15.0
self._right = 10.0
self._top = 10.0
self._bottom = 10.0
self._colbar = 10.0
@property
def left(self):
return self._left
@left.setter
def left(self, l):
self._left = float(l)
@property
def right(self):
return self._right
@right.setter
def right(self, r):
self._right = float(r)
@property
def top(self):
return self._top
@top.setter
def top(self, t):
self._top = float(t)
@property
def bottom(self):
return self._bottom
@bottom.setter
def bottom(self, b):
self._bottom = float(b)
@property
def colbar(self):
return self._colbar
@colbar.setter
def colbar(self, b):
self._colbar = float(b)
def __init__(self):
"""Set default values"""
self._n_kern = 100
self._patch_size = 20.0 # 20 mm
self._layer_bg = {'super': '0.9', 'diag': '0.8', 'sub': '0.9'}
self._layer_font = mpl.font_manager.FontProperties(size='large')
self._layer_orient = {'sender': 'horizontal', 'target': 'horizontal'}
self._pop_font = mpl.font_manager.FontProperties(size='small')
self._pop_orient = {'sender': 'horizontal', 'target': 'horizontal'}
self._lgd_tick_font = mpl.font_manager.FontProperties(size='x-small')
self._lgd_title_font = mpl.font_manager.FontProperties(size='xx-small')
self._lgd_ticks = None
self._lgd_tick_fmt = None
self._lgd_location = None
self._cbwidth = None
self._cbspace = None
self._cbheight = None
self._cboffset = None
self._z_layer = 25
self._z_pop = 50
self._z_conn = 100
self.margins = self.Margins()
def reset(self):
"""
Reset to default values.
"""
self.__init__()
@property
def n_kern(self):
"""Sample long kernel dimension at N_kernel points."""
return self._n_kern
@n_kern.setter
def n_kern(self, n):
if n <= 0:
raise ValueError('n_kern > 0 required')
self._n_kern = n
@property
def patch_size(self):
"""Length of the longest edge of the largest patch, in mm."""
return self._patch_size
@patch_size.setter
def patch_size(self, sz):
if sz <= 0:
raise ValueError('patch_size > 0 required')
self._patch_size = sz
@property
def layer_bg(self):
"""
Dictionary of colors for layer background.
Entries "super", "diag", "sub". Each entry
can be set to any valid color specification.
If just a color is given, create dict by
brightening/dimming.
"""
return self._layer_bg
@layer_bg.setter
def layer_bg(self, bg):
if isinstance(bg, dict):
if set(bg.keys()) != set(('super', 'diag', 'sub')):
raise ValueError(
'Background dict must have keys "super", "diag", "sub"')
for bgc in bg.values():
if not mpl.colors.is_color_like(bgc):
raise ValueError('Entries in background dict must be ' +
'valid color specifications.')
self._layer_bg = bg
elif not mpl.colors.is_color_like(bg):
raise ValueError(
'layer_bg must be dict or valid color specification.')
else: # is color like
rgb = mpl.colors.colorConverter.to_rgb(bg)
self._layer_bg = {'super': [1.1 * c for c in rgb],
'diag': rgb,
'sub': [0.9 * c for c in rgb]}
@property
def layer_font(self):
"""
Font to use for layer labels.
Can be set to a matplotlib.font_manager.FontProperties instance.
"""
return self._layer_font
@layer_font.setter
def layer_font(self, font):
if not isinstance(font, mpl.font_manager.FontProperties):
raise ValueError('layer_font must be a ' +
'matplotlib.font_manager.FontProperties instance')
self._layer_font = font
@property
def layer_orientation(self):
"""
Orientation of layer labels.
Dictionary with orientation of sender and target labels. Orientation
is either 'horizontal', 'vertial', or a value in degrees. When set
to a single string or number, this value is used for both sender and
target labels.
"""
return self._layer_orient
@layer_orientation.setter
def layer_orientation(self, orient):
if isinstance(orient, (str, float, int)):
tmp = {'sender': orient, 'target': orient}
elif isinstance(orient, dict):
tmp = self._layer_orient
tmp.update(orient)
else:
raise ValueError(
'Orientation ust be set to dict, string or number.')
if len(tmp) > 2:
raise ValueError('Orientation dictionary can only contain keys ' +
'"sender" and "target".')
self._layer_orient = tmp
@property
def pop_font(self):
"""
Font to use for population labels.
Can be set to a matplotlib.font_manager.FontProperties instance.
"""
return self._pop_font
@pop_font.setter
def pop_font(self, font):
if not isinstance(font, mpl.font_manager.FontProperties):
raise ValueError('pop_font must be a ' +
'matplotlib.font_manager.FontProperties instance')
self._pop_font = font
@property
def pop_orientation(self):
"""
Orientation of population labels.
Dictionary with orientation of sender and target labels. Orientation
is either 'horizontal', 'vertial', or a value in degrees. When set
to a single string or number, this value is used for both sender and
target labels.
"""
return self._pop_orient
@pop_orientation.setter
def pop_orientation(self, orient):
if isinstance(orient, (str, float, int)):
tmp = {'sender': orient, 'target': orient}
elif isinstance(orient, dict):
tmp = self._pop_orient
tmp.update(orient)
else:
raise ValueError(
'Orientation ust be set to dict, string or number.')
if len(tmp) > 2:
raise ValueError('Orientation dictionary can only contain keys ' +
'"sender" and "target".')
self._pop_orient = tmp
@property
def legend_tick_font(self):
"""
FontProperties for legend (colorbar) ticks.
"""
return self._lgd_tick_font
@legend_tick_font.setter
def legend_tick_font(self, font):
if not isinstance(font, mpl.font_manager.FontProperties):
raise ValueError('legend_tick_font must be a ' +
'matplotlib.font_manager.FontProperties instance')
self._lgd_tick_font = font
@property
def legend_title_font(self):
"""
FontProperties for legend (colorbar) titles.
"""
return self._lgd_title_font
@legend_title_font.setter
def legend_title_font(self, font):
if not isinstance(font, mpl.font_manager.FontProperties):
raise ValueError('legend_title_font must be a ' +
'matplotlib.font_manager.FontProperties instance')
self._lgd_title_font = font
@property
def legend_ticks(self):
"""
Ordered list of values at which legend (colorbar) ticks shall be set.
"""
return self._lgd_ticks
@legend_ticks.setter
def legend_ticks(self, ticks):
self._lgd_ticks = ticks
@property
def legend_tick_format(self):
"""
C-style format string for legend (colorbar) tick marks.
"""
return self._lgd_tick_fmt
@legend_tick_format.setter
def legend_tick_format(self, tickfmt):
self._lgd_tick_fmt = tickfmt
@property
def legend_location(self):
"""
If set to 'top', place legend label above colorbar,
if None, to the left.
"""
return self._lgd_location
@legend_location.setter
def legend_location(self, loc):
self._lgd_location = loc
@property
def cbwidth(self):
"""
Width of single colorbar, relative to figure width.
"""
return self._cbwidth
@cbwidth.setter
def cbwidth(self, cbw):
self._cbwidth = cbw
@property
def cbheight(self):
"""
Height of colorbar, relative to margins.colbar
"""
return self._cbheight
@cbheight.setter
def cbheight(self, cbh):
self._cbheight = cbh
@property
def cbspace(self):
"""
Spacing between colorbars, relative to figure width.
"""
return self._cbspace
@cbspace.setter
def cbspace(self, cbs):
self._cbspace = cbs
@property
def cboffset(self):
"""
Left offset of colorbar, relative to figure width.
"""
return self._cboffset
@cboffset.setter
def cboffset(self, cbo):
self._cboffset = cbo
@property
def z_layer(self):
"""Z-value for layer label axes."""
return self._z_layer
@property
def z_pop(self):
"""Z-value for population label axes."""
return self._z_pop
@property
def z_conn(self):
"""Z-value for connection kernel axes."""
return self._z_conn
# ----------------------------------------------------------------------------
# plotting settings, default values
plotParams = PlotParams()
# ----------------------------------------------------------------------------
class ConnectionPattern(object):
"""
Connection pattern representation for plotting.
When a ConnectionPattern is instantiated, all connection kernels
are pre-computed. They can later be plotted in various forms by
calling the plot() method.
The constructor requires layer and connection lists:
ConnectionPattern(layerList, connList, synTypes, **kwargs)
The layerList is used to:
- determine the size of patches
- determine the block structure
All other information is taken from the connList. Information
about synapses is inferred from the connList.
The following keyword arguments can also be given:
poporder : Population order. A dictionary mapping population names
to numbers; populations will be sorted in diagram in order
of increasing numbers. Otherwise, they are sorted
alphabetically.
intensity: 'wp' - use weight * probability (default)
'p' - use probability alone
'tcd' - use total charge deposited * probability
requires mList and Vmem; per v 0.7 only supported
for ht_neuron.
mList : model list; required for 'tcd'
Vmem : membrane potential; required for 'tcd'
"""
# ------------------------------------------------------------------------
class _LayerProps(object):
"""
Information about layer.
"""
def __init__(self, name, extent):
"""
name : name of layer
extent: spatial extent of the layer
"""
self.name = name
self.ext = extent
self.singular = extent[0] == 0.0 and extent[1] == 0.0
# ------------------------------------------------------------------------
class _SynProps(object):
"""
Information on how to plot patches for a synapse type.
"""
def __init__(self, row, col, tweight, cmap, idx):
"""
row, col: Position of synapse in grid of synapse patches, from 0,0
tweight : weight used when adding kernels for different synapses
cmap : colormap for synapse type (matplotlib.colors.Colormap)
idx : linear index, used to order colorbars in figure
"""
self.r, self.c = row, col
self.tw = tweight
self.cmap = cmap
self.index = idx
# --------------------------------------------------------------------
class _PlotKern(object):
"""
Representing object ready for plotting.
"""
def __init__(self, sl, sn, tl, tn, syn, kern):
"""
sl : sender layer
sn : sender neuron/population
tl : target layer
tn : target neuron/population
syn : synapse model
kern: kernel values (numpy masked array)
All arguments but kern are strings.
"""
self.sl = sl
self.sn = sn
self.tl = tl
self.tn = tn
self.syn = syn
self.kern = kern
# ------------------------------------------------------------------------
class _Connection(object):
def __init__(self, conninfo, layers, synapses, intensity, tcd, Vmem):
"""
Arguments:
conninfo: list of connection info entries:
(sender,target,conn_dict)
layers : list of _LayerProps objects
synapses: list of _SynProps objects
intensity: 'wp', 'p', 'tcd'
tcd : tcd object
Vmem : reference membrane potential for tcd calculations
"""
self._intensity = intensity
# get source and target layer
self.slayer, self.tlayer = conninfo[:2]
lnames = [l.name for l in layers]
if self.slayer not in lnames:
raise Exception('Unknown source layer "%s".' % self.slayer)
if self.tlayer not in lnames:
raise Exception('Unknown target layer "%s".' % self.tlayer)
# if target layer is singular (extent==(0,0)),
# we do not create a full object
self.singular = False
for l in layers:
if l.name == self.tlayer and l.singular:
self.singular = True
return
# see if we connect to/from specific neuron types
cdict = conninfo[2]
if 'sources' in cdict:
if tuple(cdict['sources'].keys()) == ('model',):
self.snrn = cdict['sources']['model']
else:
raise ValueError(
'Can only handle sources in form {"model": ...}')
else:
self.snrn = None
if 'targets' in cdict:
if tuple(cdict['targets'].keys()) == ('model',):
self.tnrn = cdict['targets']['model']
else:
raise ValueError(
'Can only handle targets in form {"model": ...}')
else:
self.tnrn = None
# now get (mean) weight, we need this if we classify
# connections by sign of weight only
try:
self._mean_wght = _weighteval(cdict['weights'])
except:
raise ValueError('No or corrupt weight information.')
# synapse model
if sorted(synapses.keys()) == ['exc', 'inh']:
# implicit synapse type, we ignore value of
# 'synapse_model', it is for use by NEST only
if self._mean_wght >= 0:
self.synmodel = 'exc'
else:
self.synmodel = 'inh'
else:
try:
self.synmodel = cdict['synapse_model']
if self.synmodel not in synapses:
raise Exception('Unknown synapse model "%s".'
% self.synmodel)
except:
raise Exception('Explicit synapse model info required.')
# store information about connection
try:
self._mask = cdict['mask']
self._kern = cdict['kernel']
self._wght = cdict['weights']
# next line presumes only one layer name will match
self._textent = [tl.ext for tl in layers
if tl.name == self.tlayer][0]
if intensity == 'tcd':
self._tcd = tcd(self.synmodel, self.tnrn, Vmem)
else:
self._tcd = None
except:
raise Exception('Corrupt connection dictionary')
# prepare for lazy evaluation
self._kernel = None
# --------------------------------------------------------------------
@property
def keyval(self):
"""
Return key and _Connection as tuple.
Useful to create dictionary via list comprehension.
"""
if self.singular:
return (None, self)
else:
return ((self.slayer, self.snrn, self.tlayer,
self.tnrn, self.synmodel),
self)
# --------------------------------------------------------------------
@property
def kernval(self):
"""Kernel value, as masked array."""
if self._kernel is None:
self._kernel = _evalkernel(self._mask, self._kern,
self._mean_wght,
self._textent, self._intensity,
self._tcd)
return self._kernel
# --------------------------------------------------------------------
@property
def mask(self):
"""Dictionary describing the mask."""
return self._mask
# --------------------------------------------------------------------
@property
def kernel(self):
"""Dictionary describing the kernel."""
return self._kern
# --------------------------------------------------------------------
@property
def weight(self):
"""Dictionary describing weight distribution."""
return self._wght
# --------------------------------------------------------------------
def matches(self, sl=None, sn=None, tl=None, tn=None, syn=None):
"""
Return True if all non-None arguments match.
Arguments:
sl : sender layer
sn : sender neuron type
tl : target layer
tn : target neuron type
syn: synapse type
"""
return ((sl is None or sl == self.slayer) and
(sn is None or sn == self.snrn) and
(tl is None or tl == self.tlayer) and
(tn is None or tn == self.tnrn) and
(syn is None or syn == self.synmodel))
# ------------------------------------------------------------------------
class _Patch(object):
"""
Represents a patch, i.e., an axes that will actually contain an
imshow graphic of a connection kernel.
The patch object contains the physical coordinates of the patch,
as well as a reference to the actual Axes object once it is created.
Also contains strings to be used as sender/target labels.
Everything is based on a coordinate system looking from the top left
corner down.
"""
# --------------------------------------------------------------------
def __init__(self, left, top, row, col, width, height,
slabel=None, tlabel=None, parent=None):
"""
Arguments:
left, top : Location of top-left corner
row, col : row, column location in parent block
width, height : Width and height of patch
slabel, tlabel: Values for sender/target label
parent : _Block to which _Patch/_Block belongs
"""
self.l, self.t, self.r, self.c = left, top, row, col
self.w, self.h = width, height
self.slbl, self.tlbl = slabel, tlabel
self.ax = None
self._parent = parent
# --------------------------------------------------------------------
def _update_size(self, new_lr):
"""Update patch size by inspecting all children."""
if new_lr[0] < self.l:
raise ValueError(
"new_lr[0] = %f < l = %f" % (new_lr[0], self.l))
if new_lr[1] < self.t:
raise ValueError(
"new_lr[1] = %f < t = %f" % (new_lr[1], self.t))
self.w, self.h = new_lr[0] - self.l, new_lr[1] - self.t
if self._parent:
self._parent._update_size(new_lr)
# --------------------------------------------------------------------
@property
def tl(self):
"""Top left corner of the patch."""
return (self.l, self.t)
# --------------------------------------------------------------------
@property
def lr(self):
"""Lower right corner of the patch."""
return (self.l + self.w, self.t + self.h)
# --------------------------------------------------------------------
@property
def l_patches(self):
"""Left edge of leftmost _Patch in _Block."""
if isinstance(self, ConnectionPattern._Block):
return min([e.l_patches for e in _flattened(self.elements)])
else:
return self.l
# --------------------------------------------------------------------
@property
def t_patches(self):
"""Top edge of topmost _Patch in _Block."""
if isinstance(self, ConnectionPattern._Block):
return min([e.t_patches for e in _flattened(self.elements)])
else:
return self.t
# --------------------------------------------------------------------
@property
def r_patches(self):
"""Right edge of rightmost _Patch in _Block."""
if isinstance(self, ConnectionPattern._Block):
return max([e.r_patches for e in _flattened(self.elements)])
else:
return self.l + self.w
# --------------------------------------------------------------------
@property
def b_patches(self):
"""Bottom edge of lowest _Patch in _Block."""
if isinstance(self, ConnectionPattern._Block):
return max([e.b_patches for e in _flattened(self.elements)])
else:
return self.t + self.h
# --------------------------------------------------------------------
@property
def location(self):
if self.r < self.c:
return 'super'
elif self.r == self.c:
return 'diag'
else:
return 'sub'
# ------------------------------------------------------------------------
class _Block(_Patch):
"""
Represents a block of patches.
A block is initialized with its top left corner and is then built
row-wise downward and column-wise to the right. Rows are added by
block.newRow(2.0, 1.5)
where 2.0 is the space between rows, 1.5 the space between the
first row. Elements are added to a row by
el = block.newElement(1.0, 0.6, 's', 't')
el = block.newElement(1.0, 0.6, 's', 't', size=[2.0, 3.0])
The first example adds a new _Block to the row. 1.0 is space between
blocks, 0.6 space before the first block in a row. 's' and 't' are
stored as slbl and tlbl (optional). If size is given, a _Patch with
the given size is created. _Patch is atomic. newElement() returns the
_Block or _Patch created.
"""
# --------------------------------------------------------------------
def __init__(self, left, top, row, col, slabel=None, tlabel=None,
parent=None):
ConnectionPattern._Patch.__init__(self, left, top, row, col, 0, 0,
slabel, tlabel, parent)
self.elements = []
self._row_top = None # top of current row
self._row = 0
self._col = 0
# --------------------------------------------------------------------
def newRow(self, dy=0.0, dynew=0.0):
"""
Open new row of elements.
Arguments:
dy : vertical skip before new row
dynew: vertical skip if new row is first row
"""
if self.elements:
# top of row is bottom of block so far + dy
self._row_top = self.lr[1] + dy
else:
# place relative to top edge of parent
self._row_top = self.tl[1] + dynew
self._row += 1
self._col = 0
self.elements.append([])
# --------------------------------------------------------------------
def newElement(self, dx=0.0, dxnew=0.0, slabel=None, tlabel=None,
size=None):
"""
Append new element to last row.
Creates _Block instance if size is not given, otherwise _Patch.
Arguments:
dx : horizontal skip before new element
dxnew : horizontal skip if new element is first
slabel: sender label (on y-axis)
tlabel: target label (on x-axis)
size : size of _Patch to create
Returns:
Created _Block or _Patch.
"""
assert (self.elements)
if self.elements[-1]:
# left edge is right edge of block so far + dx
col_left = self.lr[0] + dx
else:
# place relative to left edge of parent
col_left = self.tl[0] + dxnew
self._col += 1
if size is not None:
elem = ConnectionPattern._Patch(col_left, self._row_top,
self._row, self._col,
size[0], size[1], slabel,
tlabel, self)
else:
elem = ConnectionPattern._Block(col_left, self._row_top,
self._row, self._col,
slabel, tlabel, self)
self.elements[-1].append(elem)
self._update_size(elem.lr)
return elem
# --------------------------------------------------------------------
def addMargin(self, rmarg=0.0, bmarg=0.0):
"""Extend block by margin to right and bottom."""
if rmarg < 0.0:
raise ValueError('rmarg must not be negative!')
if bmarg < 0.0:
raise ValueError('bmarg must not be negative!')
lr = self.lr
self._update_size((lr[0] + rmarg, lr[1] + bmarg))
# ------------------------------------------------------------------------
def _prepareAxes(self, mode, showLegend):
"""
Prepare information for all axes, but do not create the actual axes
yet.
mode: one of 'detailed', 'by layer', 'totals'
"""
# parameters for figure, all quantities are in mm
patchmax = plotParams.patch_size # length of largest patch dimension
# actual parameters scaled from default patchmax = 20mm
lmargin = plotParams.margins.left
tmargin = plotParams.margins.top
rmargin = plotParams.margins.right
bmargin = plotParams.margins.bottom
cbmargin = plotParams.margins.colbar
blksep = 3. / 20. * patchmax # distance between blocks
popsep = 2. / 20. * patchmax # distance between populations
synsep = 0.5 / 20. * patchmax # distance between synapse types
# find maximal extents of individual patches, horizontal and vertical
maxext = max(_flattened([l.ext for l in self._layers]))
patchscale = patchmax / float(maxext) # determines patch size
# obtain number of synaptic patches per population pair
# maximum column across all synapse types, same for rows
nsyncols = max([s.c for s in self._synAttr.values()]) + 1
nsynrows = max([s.r for s in self._synAttr.values()]) + 1
# dictionary mapping into patch-axes, to they can be found later
self._patchTable = {}
# set to store all created patches to avoid multiple
# creation of patches at same location
axset = set()
# create entire setup, top-down
self._axes = self._Block(lmargin, tmargin, 1, 1)
for sl in self._layers:
# get sorted list of populations for sender layer
spops = sorted([p[1] for p in self._pops if p[0] == sl.name],
key=lambda pn: self._poporder[pn])
self._axes.newRow(blksep, 0.0)
for tl in self._layers:
# ignore singular target layers
if tl.singular:
continue
# get sorted list of populations for target layer
tpops = sorted([p[1] for p in self._pops if p[0] == tl.name],
key=lambda pn: self._poporder[pn])
# compute size for patches
patchsize = patchscale * np.array(tl.ext)
block = self._axes.newElement(blksep, 0.0, sl.name, tl.name)
if mode == 'totals':
# single patch
block.newRow(popsep, popsep / 2.)
p = block.newElement(popsep, popsep / 2., size=patchsize)
self._patchTable[(sl.name, None, tl.name, None, None)] = p
elif mode == 'layer':
# We loop over all rows and columns in the synapse patch
# grid. For each (r,c), we find the pertaining synapse name
# by reverse lookup in the _synAttr dictionary. This is
# inefficient, but should not be too costly overall. But we
# must create the patches in the order they are placed.
# NB: We must create also those block.newElement() that are
# not registered later, since block would otherwise not
# skip over the unused location.
for r in range(nsynrows):
block.newRow(synsep, popsep / 2.)
for c in range(nsyncols):
p = block.newElement(synsep, popsep / 2.,
size=patchsize)
smod = [k for k, s in self._synAttr.items()
if s.r == r and s.c == c]
if smod:
assert (len(smod) == 1)
self._patchTable[(sl.name, None, tl.name,
None, smod[0])] = p
elif mode == 'population':
# one patch per population pair
for sp in spops:
block.newRow(popsep, popsep / 2.)
for tp in tpops:
pblk = block.newElement(popsep, popsep / 2.,
sp, tp)
pblk.newRow(synsep, synsep / 2.)
self._patchTable[(sl.name, sp,
tl.name, tp, None)] = \
pblk.newElement(synsep, blksep / 2.,
size=patchsize)
else:
# detailed presentation of all pops
for sp in spops:
block.newRow(popsep, popsep / 2.)
for tp in tpops:
pblk = block.newElement(popsep, popsep / 2.,
sp, tp)
pblk.newRow(synsep, synsep / 2.)
# Find all connections with matching properties
# all information we need here is synapse model.
# We store this in a dictionary mapping synapse
# patch column to synapse model, for use below.
syns = dict(
[(self._synAttr[c.synmodel].c, c.synmodel)
for c in _flattened(self._cTable.values())
if c.matches(sl.name, sp, tl.name, tp)])
# create all synapse patches
for n in range(nsyncols):
# Do not duplicate existing axes.
if (sl.name, sp, tl.name, tp, n) in axset:
continue
# Create patch. We must create also such
# patches that do not have synapses, since
# spacing would go wrong otherwise.
p = pblk.newElement(synsep, 0.0,
size=patchsize)
# if patch represents existing synapse,
# register
if n in syns:
self._patchTable[(sl.name, sp, tl.name,
tp, syns[n])] = p
block.addMargin(popsep / 2., popsep / 2.)
self._axes.addMargin(rmargin, bmargin)
if showLegend:
self._axes.addMargin(0, cbmargin) # add color bar at bottom
figwidth = self._axes.lr[0] - self._axes.tl[
0] - rmargin # keep right marg out of calc
if mode == 'totals' or mode == 'population':
# single patch at right edge, 20% of figure
if plotParams.cbwidth:
lwidth = plotParams.cbwidth * figwidth
else:
lwidth = 0.2 * figwidth
if lwidth > 100.0: # colorbar shouldn't be wider than 10cm
lwidth = 100.0
lheight = (plotParams.cbheight * cbmargin
if plotParams.cbheight else 0.3 * cbmargin)
if plotParams.legend_location is None:
cblift = 0.9 * cbmargin
else:
cblift = 0.7 * cbmargin
self._cbPatches = self._Patch(self._axes.tl[0],
self._axes.lr[1] - cblift,
None, None,
lwidth,
lheight)
else:
# one patch per synapse type, 20% of figure or less
# we need to get the synapse names in ascending order
# of synapse indices
snames = [s[0] for s in
sorted([(k, v) for k, v in self._synAttr.items()],
key=lambda kv: kv[1].index)
]
snum = len(snames)
if plotParams.cbwidth:
lwidth = plotParams.cbwidth * figwidth
if plotParams.cbspace:
lstep = plotParams.cbspace * figwidth
else:
lstep = 0.5 * lwidth
else:
if snum < 5:
lwidth = 0.15 * figwidth
lstep = 0.1 * figwidth
else:
lwidth = figwidth / (snum + 1.0)
lstep = (figwidth - snum * lwidth) / (snum - 1.0)
if lwidth > 100.0: # colorbar shouldn't be wider than 10cm
lwidth = 100.0
lstep = 30.0
lheight = (plotParams.cbheight * cbmargin
if plotParams.cbheight else 0.3 * cbmargin)
if plotParams.cboffset is not None:
offset = plotParams.cboffset
else:
offset = lstep
if plotParams.legend_location is None:
cblift = 0.9 * cbmargin
else:
cblift = 0.7 * cbmargin
self._cbPatches = {}
for j in range(snum):
self._cbPatches[snames[j]] = \
self._Patch(
self._axes.tl[0] + offset + j * (lstep + lwidth),
self._axes.lr[1] - cblift,
None, None,
lwidth,
lheight)
# ------------------------------------------------------------------------
def _scaledBox(self, p):
"""Scaled axes rectangle for patch, reverses y-direction."""
xsc, ysc = self._axes.lr
return self._figscale * np.array(
[p.l / xsc, 1 - (p.t + p.h) / ysc, p.w / xsc, p.h / ysc])
# ------------------------------------------------------------------------
def _scaledBoxNR(self, p):
"""Scaled axes rectangle for patch, does not reverse y-direction."""
xsc, ysc = self._axes.lr
return self._figscale * np.array(
[p.l / xsc, p.t / ysc, p.w / xsc, p.h / ysc])
# ------------------------------------------------------------------------
def _configSynapses(self, cList, synTypes):
"""Configure synapse information based on connections and user info."""
# compile information on synapse types and weights
synnames = set(c[2]['synapse_model'] for c in cList)
synweights = set(_weighteval(c[2]['weights']) for c in cList)
# set up synTypes for all pre-defined cases
if synTypes:
# check if there is info for all synapse types
stnames = _flattened([[s.name for s in r] for r in synTypes])
if len(stnames) != len(set(stnames)):
raise ValueError(
'Names of synapse types in synTypes must be unique!')
if len(synnames) > 1 and not synnames.issubset(set(stnames)):
raise ValueError('synTypes must provide information about' +
'all synapse types.')
elif len(synnames) == 1:
# only one synapse type used
if min(synweights) >= 0:
# all weights positive
synTypes = ((SynType('exc', 1.0, 'red'),),)
elif max(synweights) <= 0:
# all weights negative
synTypes = ((SynType('inh', -1.0, 'blue'),),)
else:
# positive and negative weights, assume Dale holds
synTypes = ((SynType('exc', 1.0, 'red'),),
(SynType('inh', -1.0, 'blue'),))
elif synnames == set(['AMPA', 'GABA_A']):
# only AMPA and GABA_A
synTypes = ((SynType('AMPA', 1.0, 'red'),),
(SynType('GABA_A', -1.0, 'blue'),))
elif synnames.issubset(set(['AMPA', 'NMDA', 'GABA_A', 'GABA_B'])):
synTypes = ((SynType('AMPA', 1.0, 'red'),
SynType('NMDA', 1.0, 'orange'),),
(SynType('GABA_A', -1.0, 'blue'),
SynType('GABA_B', -1.0, 'purple'),))
else:
raise ValueError('Connection list contains unknown synapse ' +
'models; synTypes required.')
# now build _synAttr by assigning blocks to rows
self._synAttr = {}
row = 0
ctr = 0
for sgroup in synTypes:
col = 0
for stype in sgroup:
self._synAttr[stype.name] = self._SynProps(row, col,
stype.relweight,
stype.cmap, ctr)
col += 1
ctr += 1
row += 1
# ------------------------------------------------------------------------
def __init__(self, lList, cList, synTypes=None, intensity='wp',
mList=None, Vmem=None, poporder=None):
"""
lList : layer list
cList : connection list
synTypes : nested list of synapse types
intensity: 'wp' - weight * probability
'p' - probability
'tcd' - |total charge deposited| * probability
requires mList; currently only for ht_model
proper results only if Vmem within reversal
potentials
mList : model list; only needed with 'tcd'
Vmem : reference membrane potential for 'tcd'
poporder : dictionary mapping population names to numbers; populations
will be sorted in diagram in order of increasing numbers.
"""
# extract layers to dict mapping name to extent
self._layers = [self._LayerProps(l[0], l[1]['extent']) for l in lList]
# ensure layer names are unique
lnames = [l.name for l in self._layers]
if len(lnames) != len(set(lnames)):
raise ValueError('Layer names must be unique.')
# set up synapse attributes
self._configSynapses(cList, synTypes)
# if tcd mode, build tcd representation
if intensity != 'tcd':
tcd = None
else:
assert (mList)
from . import tcd_nest
tcd = tcd_nest.TCD(mList)
# Build internal representation of connections.
# This representation contains one entry for each sender pop,
# target pop, synapse type tuple. Creating the connection object
# implies computation of the kernel.
# Several connection may agree in all properties, these need to be
# added here. Therefore, we need to build iteratively and store
# everything in a dictionary, so we can find early instances.
self._cTable = {}
for conn in cList:
key, val = self._Connection(conn, self._layers, self._synAttr,
intensity, tcd, Vmem).keyval
if key:
if key in self._cTable:
self._cTable[key].append(val)
else:
self._cTable[key] = [val]
# number of layers
self._nlyr = len(self._layers)
# compile list of populations, list(set()) makes list unique
self._pops = list(
set(_flattened([[(c.slayer, c.snrn), (c.tlayer, c.tnrn)]
for c in _flattened(self._cTable.values())])))
self._npop = len(self._pops)
# store population ordering; if not given, use alphabetical ordering
# also add any missing populations alphabetically at end
# layers are ignored
# create alphabetically sorted list of unique population names
popnames = sorted(list(set([p[1] for p in self._pops])),
key=lambda x: x if x is not None else "")
if poporder:
self._poporder = poporder
next = max(self._poporder.values()) + 1 # next free sorting index
else:
self._poporder = {}
next = 0
for pname in popnames:
if pname not in self._poporder:
self._poporder[pname] = next
next += 1
# compile list of synapse types
self._synTypes = list(
set([c.synmodel for c in _flattened(self._cTable.values())]))
# ------------------------------------------------------------------------
def plot(self, aggrGroups=False, aggrSyns=False, globalColors=False,
colorLimits=None, showLegend=True,
selectSyns=None, file=None, fixedWidth=None):
"""
Plot connection pattern.
By default, connections between any pair of populations
are plotted on the screen, with separate color scales for
all patches.
Arguments:
aggrGroups If True, aggregate projections with the same synapse type
and the same source and target groups (default: False)
aggrSyns If True, aggregate projections with the same synapse model
(default: False)
globalColors If True, use global color scale, otherwise local
(default: False)
colorLimits If given, must be two element vector for lower and
upper limits of color scale. Implies globalColors
(default: None)
showLegend If True, show legend below CPT (default: True).
selectSyns If tuple of synapse models, show only connections of the
give types. Cannot be combined with aggregation.
file If given, save plot to given file name; file may also be a
tuple of file names, the figure will then be saved to all
files. This may be useful if you want to save the same figure
in several formats.
fixedWidth Figure will be scaled to this width in mm by changing
patch size.
Returns:
kern_min, kern_max Minimal and maximal values of kernels,
with kern_min <= 0, kern_max >= 0.
Output:
figure created
"""
# translate new to old paramter names (per v 0.5)
normalize = globalColors
if colorLimits:
normalize = True
if selectSyns:
if aggrPops or aggrSyns:
raise ValueError(
'selectSyns cannot be combined with aggregation.')
selected = selectSyns
mode = 'select'
elif aggrGroups and aggrSyns:
mode = 'totals'
elif aggrGroups and not aggrSyns:
mode = 'layer'
elif aggrSyns and not aggrGroups:
mode = 'population'
else:
mode = None
if mode == 'layer':
# reduce to dimensions sender layer, target layer, synapse type
# add all kernels agreeing on these three attributes
plotKerns = []
for slayer in self._layers:
for tlayer in self._layers:
for synmodel in self._synTypes:
kerns = [c.kernval for c in
_flattened(self._cTable.values())
if c.matches(sl=slayer.name, tl=tlayer.name,
syn=synmodel)]
if len(kerns) > 0:
plotKerns.append(
self._PlotKern(slayer.name, None, tlayer.name,
None, synmodel,
_addKernels(kerns)))
elif mode == 'population':
# reduce to dimensions sender layer, target layer
# all all kernels, weighting according to synapse type
plotKerns = []
for spop in self._pops:
for tpop in self._pops:
kerns = [self._synAttr[c.synmodel].tw * c.kernval for c in
_flattened(self._cTable.values())
if c.matches(sl=spop[0], sn=spop[1], tl=tpop[0],
tn=tpop[1])]
if len(kerns) > 0:
plotKerns.append(
self._PlotKern(spop[0], spop[1], tpop[0], tpop[1],
None,
_addKernels(kerns)))
elif mode == 'totals':
# reduce to dimensions sender layer, target layer
# all all kernels, weighting according to synapse type
plotKerns = []
for slayer in self._layers:
for tlayer in self._layers:
kerns = [self._synAttr[c.synmodel].tw * c.kernval for c in
_flattened(self._cTable.values())
if c.matches(sl=slayer.name, tl=tlayer.name)]
if len(kerns) > 0:
plotKerns.append(
self._PlotKern(slayer.name, None, tlayer.name,
None, None, _addKernels(kerns)))
elif mode == 'select':
# copy only those kernels that have the requested synapse type,
# no dimension reduction
# We need to sum all kernels in the list for a set of attributes
plotKerns = [
self._PlotKern(clist[0].slayer, clist[0].snrn, clist[0].tlayer,
clist[0].tnrn,
clist[0].synmodel,
_addKernels([c.kernval for c in clist]))
for clist in self._cTable.values() if
clist[0].synmodel in selected]
else:
# copy all
# We need to sum all kernels in the list for a set of attributes
plotKerns = [
self._PlotKern(clist[0].slayer, clist[0].snrn, clist[0].tlayer,
clist[0].tnrn,
clist[0].synmodel,
_addKernels([c.kernval for c in clist]))
for clist in self._cTable.values()]
self._prepareAxes(mode, showLegend)
if fixedWidth:
margs = plotParams.margins.left + plotParams.margins.right
if fixedWidth <= margs:
raise ValueError('Requested width must be less than ' +
'width of margins (%g mm)' % margs)
currWidth = self._axes.lr[0]
currPatchMax = plotParams.patch_size # store
# compute required patch size
plotParams.patch_size = ((fixedWidth - margs) /
(currWidth - margs) * currPatchMax)
# build new axes
del self._axes
self._prepareAxes(mode, showLegend)
# restore patch size
plotParams.patch_size = currPatchMax
# create figure with desired size
fsize = np.array(self._axes.lr) / 25.4 # convert mm to inches
f = plt.figure(figsize=fsize, facecolor='w')
# size will be rounded according to DPI setting, adjust fsize
dpi = f.get_dpi()
fsize = np.floor(fsize * dpi) / dpi
# check that we got the correct size
actsize = np.array([f.get_figwidth(), f.get_figheight()], dtype=float)
if all(actsize == fsize):
self._figscale = 1.0 # no scaling
else:
warnings.warn("""
WARNING: Figure shrunk on screen!
The figure is shrunk to fit onto the screen.
Please specify a different backend using the -d
option to obtain full-size figures. Your current
backend is: %s
""" % mpl.get_backend())
plt.close(f)
# determine scale: most shrunk dimension
self._figscale = np.min(actsize / fsize)
# create shrunk on-screen figure
f = plt.figure(figsize=self._figscale * fsize, facecolor='w')
# just ensure all is well now
actsize = np.array([f.get_figwidth(), f.get_figheight()],
dtype=float)
# add decoration
for block in _flattened(self._axes.elements):
ax = f.add_axes(self._scaledBox(block),
axisbg=plotParams.layer_bg[block.location],
xticks=[], yticks=[],
zorder=plotParams.z_layer)
if hasattr(ax, 'frame'):
ax.frame.set_visible(False)
else:
for sp in ax.spines.values():
# turn off axis lines, make room for frame edge
sp.set_color('none')
if block.l <= self._axes.l_patches and block.slbl:
ax.set_ylabel(block.slbl,
rotation=plotParams.layer_orientation['sender'],
fontproperties=plotParams.layer_font)
if block.t <= self._axes.t_patches and block.tlbl:
ax.set_xlabel(block.tlbl,
rotation=plotParams.layer_orientation['target'],
fontproperties=plotParams.layer_font)
ax.xaxis.set_label_position('top')
# inner blocks for population labels
if mode not in ('totals', 'layer'):
for pb in _flattened(block.elements):
if not isinstance(pb, self._Block):
continue # should not happen
ax = f.add_axes(self._scaledBox(pb),
axisbg='none', xticks=[], yticks=[],
zorder=plotParams.z_pop)
if hasattr(ax, 'frame'):
ax.frame.set_visible(False)
else:
for sp in ax.spines.values():
# turn off axis lines, make room for frame edge
sp.set_color('none')
if pb.l + pb.w >= self._axes.r_patches and pb.slbl:
ax.set_ylabel(pb.slbl,
rotation=plotParams.pop_orientation[
'sender'],
fontproperties=plotParams.pop_font)
ax.yaxis.set_label_position('right')
if pb.t + pb.h >= self._axes.b_patches and pb.tlbl:
ax.set_xlabel(pb.tlbl,
rotation=plotParams.pop_orientation[
'target'],
fontproperties=plotParams.pop_font)
# determine minimum and maximum values across all kernels,
# but set min <= 0, max >= 0
kern_max = max(0.0, max([np.max(kern.kern) for kern in plotKerns]))
kern_min = min(0.0, min([np.min(kern.kern) for kern in plotKerns]))
# determine color limits for plots
if colorLimits:
c_min, c_max = colorLimits # explicit values
else:
# default values for color limits
# always 0 as lower limit so anything > 0 is non-white,
# except when totals or populations
c_min = None if mode in ('totals', 'population') else 0.0
c_max = None # use patch maximum as upper limit
if normalize:
# use overall maximum, at least 0
c_max = kern_max
if aggrSyns:
# use overall minimum, if negative, otherwise 0
c_min = kern_min
# for c_max, use the larger of the two absolute values
c_max = kern_max
# if c_min is non-zero, use same color scale for neg values
if c_min < 0:
c_min = -c_max
# Initialize dict storing sample patches for each synapse type for use
# in creating color bars. We will store the last patch of any given
# synapse type for reference. When aggrSyns, we have only one patch
# type and store that.
if not aggrSyns:
samplePatches = dict(
[(sname, None) for sname in self._synAttr.keys()])
else:
# only single type of patches
samplePatches = None
for kern in plotKerns:
p = self._patchTable[(kern.sl, kern.sn, kern.tl,
kern.tn, kern.syn)]
p.ax = f.add_axes(self._scaledBox(p), aspect='equal',
xticks=[], yticks=[], zorder=plotParams.z_conn)
p.ax.patch.set_edgecolor('none')
if hasattr(p.ax, 'frame'):
p.ax.frame.set_visible(False)
else:
for sp in p.ax.spines.values():
# turn off axis lines, make room for frame edge
sp.set_color('none')
if not aggrSyns:
# we have synapse information -> not totals, a vals positive
assert (kern.syn)
assert (np.min(kern.kern) >= 0.0)
# we may overwrite here, but this does not matter, we only need
# some reference patch
samplePatches[kern.syn] = p.ax.imshow(kern.kern,
vmin=c_min, vmax=c_max,
cmap=self._synAttr[
kern.syn].cmap) # ,
# interpolation='nearest')
else:
# we have totals, special color table and normalization
# we may overwrite here, but this does not matter, we only need
# some reference patch
samplePatches = p.ax.imshow(kern.kern,
vmin=c_min, vmax=c_max,
cmap=cm.bluered,
norm=cm.ZeroCenterNorm())
# interpolation='nearest')
# Create colorbars at bottom of figure
if showLegend:
# FIXME: rewrite the function to avoid comparisons with None!
f_min = float("-inf") if c_min is None else c_min
f_max = float("-inf") if c_max is None else c_max
# Do we have kernel values exceeding the color limits?
if f_min <= kern_min and kern_max <= f_max:
extmode = 'neither'
elif f_min > kern_min and kern_max <= f_max:
extmode = 'min'
elif f_min <= kern_min and kern_max > f_max:
extmode = 'max'
else:
extmode = 'both'
if aggrSyns:
cbax = f.add_axes(self._scaledBox(self._cbPatches))
# by default, use 4 ticks to avoid clogging
# according to docu, we need a separate Locator object
# for each axis.
if plotParams.legend_ticks:
tcks = plotParams.legend_ticks
else:
tcks = mpl.ticker.MaxNLocator(nbins=4)
if normalize:
# colorbar with freely settable ticks
cb = f.colorbar(samplePatches, cax=cbax,
orientation='horizontal',
ticks=tcks,
format=plotParams.legend_tick_format,
extend=extmode)
else:
# colorbar with tick labels 'Exc', 'Inh'
# we add the color bare here explicitly, so we get no
# problems if the sample patch includes only pos or
# only neg values
cb = mpl.colorbar.ColorbarBase(cbax, cmap=cm.bluered,
orientation='horizontal')
cbax.set_xticks([0, 1])
cbax.set_xticklabels(['Inh', 'Exc'])
cb.outline.set_linewidth(0.5) # narrower line around colorbar
# fix font for ticks
plt.setp(cbax.get_xticklabels(),
fontproperties=plotParams.legend_tick_font)
# no title in this case
else:
# loop over synapse types
for syn in self._synAttr.keys():
cbax = f.add_axes(self._scaledBox(self._cbPatches[syn]))
if plotParams.legend_location is None:
cbax.set_ylabel(
syn,
fontproperties=plotParams.legend_title_font,
rotation='horizontal')
else:
cbax.set_title(
syn,
fontproperties=plotParams.legend_title_font,
rotation='horizontal')
if normalize:
# by default, use 4 ticks to avoid clogging
# according to docu, we need a separate Locator object
# for each axis.
if plotParams.legend_ticks:
tcks = plotParams.legend_ticks
else:
tcks = mpl.ticker.MaxNLocator(nbins=4)
# proper colorbar
cb = f.colorbar(samplePatches[syn], cax=cbax,
orientation='horizontal',
ticks=tcks,
format=plotParams.legend_tick_format,
extend=extmode)
cb.outline.set_linewidth(
0.5) # narrower line around colorbar
# fix font for ticks
plt.setp(cbax.get_xticklabels(),
fontproperties=plotParams.legend_tick_font)
else:
# just a solid color bar with no ticks
cbax.set_xticks([])
cbax.set_yticks([])
# full-intensity color from color map
cbax.set_axis_bgcolor(self._synAttr[syn].cmap(1.0))
# narrower border
if hasattr(cbax, 'frame'):
cbax.frame.set_linewidth(0.5)
else:
for sp in cbax.spines.values():
sp.set_linewidth(0.5)
# save to file(s), use full size
f.set_size_inches(fsize)
if isinstance(file, (list, tuple)):
for fn in file:
f.savefig(fn)
elif isinstance(file, str):
f.savefig(file)
f.set_size_inches(actsize) # reset size for further interactive work
return kern_min, kern_max
# ------------------------------------------------------------------------
def toLaTeX(self, file, standalone=False, enumerate=False, legend=True):
"""
Write connection table to file.
Arguments:
file output file name
standalone create complete LaTeX file (default: False)
enumerate enumerate connections (default: False)
legend add explanation of functions used (default: True)
"""
lfile = open(file, 'w')
if not lfile:
raise Exception('Could not open file "%s"' % file)
if standalone:
lfile.write(
r"""
\documentclass[a4paper,american]{article}
\usepackage[pdftex,margin=1in,centering,
noheadfoot,a4paper]{geometry}
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{color}
\usepackage{calc}
\usepackage{tabularx} % autom. adjusts column width in tables
\usepackage{multirow} % allows entries spanning several rows
\usepackage{colortbl} % allows coloring tables
\usepackage[fleqn]{amsmath}
\setlength{\mathindent}{0em}
\usepackage{mathpazo}
\usepackage[scaled=.95]{helvet}
\renewcommand\familydefault{\sfdefault}
\renewcommand\arraystretch{1.2}
\pagestyle{empty}
% \hdr{ncols}{label}{title}
%
% Typeset header bar across table with ncols columns
% with label at left margin and centered title
%
\newcommand{\hdr}[3]{%
\multicolumn{#1}{|l|}{%
\color{white}\cellcolor[gray]{0.0}%
\textbf{\makebox[0pt]{#2}\hspace{0.5\linewidth}%
\makebox[0pt][c]{#3}}%
}%
}
\begin{document}
""")
lfile.write(
r"""
\noindent\begin{tabularx}{\linewidth}{%s|l|l|l|c|c|X|}\hline
\hdr{%d}{}{Connectivity}\\\hline
%s \textbf{Src} & \textbf{Tgt} & \textbf{Syn} &
\textbf{Wght} & \textbf{Mask} & \textbf{Kernel} \\\hline
""" % (('|r', 7, '&') if enumerate else ('', 6, '')))
# ensure sorting according to keys, gives some alphabetic sorting
haveU, haveG = False, False
cctr = 0 # connection counter
for ckey in sorted(self._cTable.keys()):
for conn in self._cTable[ckey]:
cctr += 1
if enumerate:
lfile.write('%d &' % cctr)
# take care to escape _ in names such as GABA_A
# also remove any pending '/None'
lfile.write((r'%s/%s & %s/%s & %s' %
(conn.slayer, conn.snrn, conn.tlayer, conn.tnrn,
conn.synmodel)).replace('_', r'\_').replace(
'/None', ''))
lfile.write(' & \n')
if isinstance(conn.weight, (int, float)):
lfile.write(r'%g' % conn.weight)
elif 'uniform' in conn.weight:
cw = conn.weight['uniform']
lfile.write(
r'$\mathcal{U}[%g, %g)$' % (cw['min'], cw['max']))
haveU = True
else:
raise ValueError(
'Unkown weight type "%s"' % conn.weight.__str__)
lfile.write(' & \n')
if 'circular' in conn.mask:
lfile.write(r'$\leq %g$' % conn.mask['circular']['radius'])
elif 'rectangular' in conn.mask:
cmr = conn.mask['rectangular']
lfile.write(
r"""$[(%+g, %+g), (%+g, %+g)]$"""
% (cmr['lower_left'][0], cmr['lower_left'][1],
cmr['upper_right'][0], cmr['upper_right'][1]))
else:
raise ValueError(
'Unknown mask type "%s"' % conn.mask.__str__)
lfile.write(' & \n')
if isinstance(conn.kernel, (int, float)):
lfile.write(r'$%g$' % conn.kernel)
elif 'gaussian' in conn.kernel:
ckg = conn.kernel['gaussian']
lfile.write(r'$\mathcal{G}(p_0 = %g, \sigma = %g)$' %
(ckg['p_center'], ckg['sigma']))
haveG = True
else:
raise ValueError(
'Unkown kernel type "%s"' % conn.kernel.__str__)
lfile.write('\n')
lfile.write(r'\\\hline' '\n')
if legend and (haveU or haveG):
# add bottom line with legend
lfile.write(r'\hline' '\n')
lfile.write(r'\multicolumn{%d}{|l|}{\footnotesize ' %
(7 if enumerate else 6))
if haveG:
lfile.write(r'$\mathcal{G}(p_0, \sigma)$: ' +
r'$p(\mathbf{x})=p_0 e^{-\mathbf{x}^2/2\sigma^2}$')
if haveG and haveU:
lfile.write(r', ')
if haveU:
lfile.write(
r'$\mathcal{U}[a, b)$: uniform distribution on $[a, b)$')
lfile.write(r'}\\\hline' '\n')
lfile.write(r'\end{tabularx}' '\n\n')
if standalone:
lfile.write(r'\end{document}''\n')
lfile.close()
# ----------------------------------------------------------------------------
def _evalkernel(mask, kernel, weight, extent, intensity, tcd):
"""
Plot kernel within extent.
Kernel values are multiplied with abs(weight). If weight is a
distribution, the mean value is used.
Result is a masked array, in which the values outside the mask are
masked.
"""
# determine resolution, number of data points
dx = max(extent) / plotParams.n_kern
nx = np.ceil(extent[0] / dx)
ny = np.ceil(extent[1] / dx)
x = np.linspace(-0.5 * extent[0], 0.5 * extent[0], nx)
y = np.linspace(-0.5 * extent[1], 0.5 * extent[1], ny)
X, Y = np.meshgrid(x, y)
if intensity == 'wp':
return np.ma.masked_array(abs(weight) * _kerneval(X, Y, kernel),
np.logical_not(_maskeval(X, Y, mask)))
elif intensity == 'p':
return np.ma.masked_array(_kerneval(X, Y, kernel),
np.logical_not(_maskeval(X, Y, mask)))
elif intensity == 'tcd':
return np.ma.masked_array(
abs(tcd) * abs(weight) * _kerneval(X, Y, kernel),
np.logical_not(_maskeval(X, Y, mask)))
# ----------------------------------------------------------------------------
def _weighteval(weight):
"""Returns weight, or mean of distribution, signed."""
w = None
if isinstance(weight, (float, int)):
w = weight
elif isinstance(weight, dict):
assert (len(weight) == 1)
if 'uniform' in weight:
w = 0.5 * (weight['uniform']['min'] + weight['uniform']['max'])
elif 'gaussian' in weight:
w = weight['gaussian']['mean']
else:
raise Exception(
'Unknown weight type "%s"' % tuple(weight.keys())[0])
if not w:
raise Exception('Cannot handle weight.')
return float(w)
# ----------------------------------------------------------------------------
def _maskeval(x, y, mask):
"""
Evaluate mask given as topology style dict at
(x,y). Assume x,y are 2d numpy matrices.
"""
assert (len(mask) == 1)
if 'circular' in mask:
r = mask['circular']['radius']
m = x ** 2 + y ** 2 <= r ** 2
elif 'doughnut' in mask:
ri = mask['doughnut']['inner_radius']
ro = mask['doughnut']['outer_radius']
d = x ** 2 + y ** 2
m = np.logical_and(ri <= d, d <= ro)
elif 'rectangular' in mask:
ll = mask['rectangular']['lower_left']
ur = mask['rectangular']['upper_right']
m = np.logical_and(np.logical_and(ll[0] <= x, x <= ur[0]),
np.logical_and(ll[1] <= y, y <= ur[1]))
else:
raise Exception('Unknown mask type "%s"' % tuple(mask.keys())[0])
return m
# ----------------------------------------------------------------------------
def _kerneval(x, y, fun):
"""
Evaluate function given as topology style dict at
(x,y). Assume x,y are 2d numpy matrices
"""
if isinstance(fun, (float, int)):
return float(fun) * np.ones(np.shape(x))
elif isinstance(fun, dict):
assert (len(fun) == 1)
if 'gaussian' in fun:
g = fun['gaussian']
p0 = g['p_center']
sig = g['sigma']
return p0 * np.exp(-0.5 * (x ** 2 + y ** 2) / sig ** 2)
else:
raise Exception('Unknown kernel "%s"', tuple(fun.keys())[0])
# something very wrong
raise Exception('Cannot handle kernel.')
# ----------------------------------------------------------------------------
def _addKernels(kList):
"""
Add a list of kernels.
Arguments:
kList: List of masked arrays of equal size.
Returns:
Masked array of same size as input. All values are added,
setting masked values to 0. The mask for the sum is the
logical AND of all individual masks, so that only such
values are masked that are masked in all kernels.
_addKernels always returns a new array object, even if
kList has only a single element.
"""
assert (len(kList) > 0)
if len(kList) < 2:
return kList[0].copy()
d = np.ma.filled(kList[0], fill_value=0).copy()
m = kList[0].mask.copy()
for k in kList[1:]:
d += np.ma.filled(k, fill_value=0)
m = np.logical_and(m, k.mask)
return np.ma.masked_array(d, m)
# ----------------------------------------------------------------------------
def _flattened(lst):
"""Returned list flattend at first level."""
return sum(lst, [])
# ----------------------------------------------------------------------------
"""
if __name__ == "__main__":
import sys
sys.path += ['./examples']
# import simple
# reload(simple)
cp = ConnectionPattern(simple.layerList, simple.connectList)
import simple2
reload(simple2)
cp2 = ConnectionPattern(simple2.layerList, simple2.connectList)
st3 = ((SynType('GABA_B', -5.0, 'orange'),
SynType('GABA_A', -1.0, 'm')),
(SynType('NMDA', 5.0, 'b'),
SynType('FOO', 1.0, 'aqua'),
SynType('AMPA', 3.0, 'g')))
cp3s = ConnectionPattern(simple2.layerList, simple2.connectList,
synTypes=st3)
import simple3
reload(simple3)
cp3 = ConnectionPattern(simple3.layerList, simple3.connectList)
# cp._prepareAxes('by layer')
# cp2._prepareAxes('by layer')
# cp3._prepareAxes('detailed')
cp2.plot()
cp2.plot(mode='layer')
cp2.plot(mode='population')
cp2.plot(mode='totals')
cp2.plot(mode=('AMPA',))
cp2.plot(mode=('AMPA','GABA_B'))
# cp3.plot()
# cp3.plot(mode='population')
# cp3.plot(mode='layer')
# cp3.plot(mode='totals')
# cp.plot(normalize=True)
# cp.plot(totals=True, normalize=True)
# cp2.plot()
# cp2.plot(file=('cp3.eps'))
# cp2.plot(byLayer=True)
# cp2.plot(totals=True)
"""
| gpl-2.0 |
marscher/mdtraj | MDTraj/core/topology.py | 1 | 30937 | ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Peter Eastman, Robert McGibbon
# Contributors: Kyle A. Beauchamp
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
#
# Portions of this code originate from the OpenMM molecular simulation
# toolkit, copyright (c) 2012 Stanford University and Peter Eastman. Those
# portions are distributed under the following terms:
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
##############################################################################
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
import os
import re
import numpy as np
import itertools
from mdtraj.core import element as elem
import xml.etree.ElementTree as etree
from mdtraj.utils import ilen, import_
##############################################################################
# Utilities
##############################################################################
def _topology_from_subset(topology, atom_indices):
"""Create a new topology that only contains the supplied indices
Note
----
This really should be a copy constructor (class method) on Topology,
but I want it to work on either the mdtraj topology OR the OpenMM
topology. An inplace version for the topology object we have here
is also available.
Parameters
----------
topology : topology
The base topology
atom_indices : list([int])
The indices of the atoms to keep
"""
newTopology = Topology()
old_atom_to_new_atom = {}
for chain in topology._chains:
newChain = newTopology.add_chain()
for residue in chain._residues:
resSeq = getattr(residue, 'resSeq', None) or residue.index
newResidue = newTopology.add_residue(residue.name, newChain, resSeq)
for atom in residue._atoms:
if atom.index in atom_indices:
newAtom = newTopology.add_atom(atom.name, atom.element, newResidue)
old_atom_to_new_atom[atom] = newAtom
bondsiter = topology.bonds
if not hasattr(bondsiter, '__iter__'):
bondsiter = bondsiter()
for atom1, atom2 in bondsiter:
try:
newTopology.add_bond(old_atom_to_new_atom[atom1],
old_atom_to_new_atom[atom2])
except KeyError:
pass
# we only put bonds into the new topology if both of their partners
# were indexed and thus HAVE a new atom
# Delete empty residues
for chain in newTopology._chains:
chain._residues = [r for r in chain._residues if len(r._atoms) > 0]
# Delete empty chains
newTopology._chains = [c for c in newTopology._chains if len(c._residues) > 0]
# Re-set the numAtoms and numResidues
newTopology._numAtoms = ilen(newTopology.atoms)
newTopology._numResidues = ilen(newTopology.residues)
return newTopology
##############################################################################
# Classes
##############################################################################
class Topology(object):
"""Topology stores the topological information about a system.
The structure of a Topology object is similar to that of a PDB file.
It consists of a set of Chains (often but not always corresponding to
polymer chains). Each Chain contains a set of Residues, and each Residue
contains a set of Atoms. In addition, the Topology stores a list of which
atom pairs are bonded to each other.
Atom and residue names should follow the PDB 3.0 nomenclature for all
molecules for which one exists.
Attributes
----------
chains : generator
Iterator over all Chains in the Topology.
residues : genetator
Iterator over all Residues in the Chain.
atoms : generator
Iterator over all Atoms in the Chain.
Examples
--------
>>> topology = md.load('example.pdb').topology
>>> print(topology)
<mdtraj.Topology with 1 chains, 3 residues, 22 atoms, 21 bonds at 0x105a98e90>
>>> table, bonds = topology.to_dataframe()
>>> print(table.head())
serial name element resSeq resName chainID
0 0 H1 H 0 CYS 0
1 1 CH3 C 0 CYS 0
2 2 H2 H 0 CYS 0
3 3 H3 H 0 CYS 0
4 4 C C 0 CYS 0
>>> # rename residue "CYS" to "CYSS"
>>> table[table['residue'] == 'CYS']['residue'] = 'CYSS'
>>> print(table.head())
serial name element resSeq resName chainID
0 0 H1 H 0 CYSS 0
1 1 CH3 C 0 CYSS 0
2 2 H2 H 0 CYSS 0
3 3 H3 H 0 CYSS 0
4 4 C C 0 CYSS 0
>>> t2 = md.Topology.from_dataframe(table, bonds)
"""
_standardBonds = {}
def __init__(self):
"""Create a new Topology object"""
self._chains = []
self._numResidues = 0
self._numAtoms = 0
self._bonds = []
self._atoms = []
self._residues = []
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return "<%s>" % (self._string_summary_basic())
def __repr__(self):
return "<%s at 0x%02x>" % (self._string_summary_basic(), id(self))
def _string_summary_basic(self):
return "mdtraj.Topology with %d chains, %d residues, %d atoms, %d bonds" % (self.n_chains, self.n_residues, self.n_atoms, len(self._bonds))
def copy(self):
"""Return a copy of the topology
Returns
-------
out : Topology
A copy of this topology
"""
out = Topology()
for chain in self.chains:
c = out.add_chain()
for residue in chain.residues:
r = out.add_residue(residue.name, c, residue.resSeq)
for atom in residue.atoms:
out.add_atom(atom.name, atom.element, r)
for a1, a2 in self.bonds:
out.add_bond(a1, a2)
return out
def __copy__(self, *args):
return self.copy()
def __deepcopy__(self, *args):
return self.copy()
def join(self, other):
"""Join two topologies together
Parameters
----------
other : Topology
Another topology object
Returns
-------
out : Topology
A joint topology, with all of the atoms/residues/chains/bonds
in each of the individual topologies
"""
if not isinstance(other, Topology):
raise ValueError('other must be an instance of Topology to join')
out = self.copy()
atom_mapping = {}
for chain in other.chains:
c = out.add_chain()
for residue in chain.residues:
r = out.add_residue(residue.name, c, residue.resSeq)
for atom in residue.atoms:
a = out.add_atom(atom.name, atom.element, r)
atom_mapping[atom] = a
for a1, a2 in other.bonds:
out.add_bond(atom_mapping[a1], atom_mapping[a2])
return out
def to_openmm(self):
"""Convert this topology into OpenMM topology
Returns
-------
topology : simtk.openmm.app.Topology
This topology, as an OpenMM topology
"""
app = import_('simtk.openmm.app')
out = app.Topology()
atom_mapping = {}
for chain in self.chains:
c = out.addChain()
for residue in chain.residues:
r = out.addResidue(residue.name, c)
for atom in residue.atoms:
a = out.addAtom(atom.name, app.Element.getBySymbol(atom.element.symbol), r)
atom_mapping[atom] = a
for a1, a2 in self.bonds:
out.addBond(atom_mapping[a1], atom_mapping[a2])
return out
@classmethod
def from_openmm(cls, value):
"""Create a mdtraj topology from an OpenMM topology
Parameters
----------
value : simtk.openmm.app.Topology
An OpenMM topology that you wish to convert to a
mdtraj topology.
"""
app = import_('simtk.openmm.app')
if not isinstance(value, app.Topology):
raise TypeError('value must be an OpenMM Topology. '
'You supplied a %s' % type(value))
out = cls()
atom_mapping = {}
for chain in value.chains():
c = out.add_chain()
for residue in chain.residues():
r = out.add_residue(residue.name, c)
for atom in residue.atoms():
a = out.add_atom(atom.name, elem.get_by_symbol(atom.element.symbol), r)
atom_mapping[atom] = a
for a1, a2 in value.bonds():
out.add_bond(atom_mapping[a1], atom_mapping[a2])
return out
def to_dataframe(self):
"""Convert this topology into a pandas dataframe
Returns
-------
atoms : pandas.DataFrame
The atoms in the topology, represented as a data frame.
bonds : np.ndarray
The bonds in this topology, represented as an n_bonds x 2 array
of the indices of the atoms involved in each bond.
"""
pd = import_('pandas')
data = []
for atom in self.atoms:
if atom.element is None:
element_symbol = ""
else:
element_symbol = atom.element.symbol
data.append((atom.index, atom.name, element_symbol,
atom.residue.resSeq, atom.residue.name,
atom.residue.chain.index))
atoms = pd.DataFrame(data, columns=["serial", "name", "element",
"resSeq", "resName", "chainID"])
atoms = atoms.set_index("serial")
bonds = np.array([(a.index, b.index) for (a, b) in self.bonds])
return atoms, bonds
@classmethod
def from_dataframe(cls, atoms, bonds=None):
"""Create a mdtraj topology from a pandas data frame
Parameters
----------
atoms : pandas.DataFrame
The atoms in the topology, represented as a data frame. This data
frame should have columns "serial" (atom index), "name" (atom name),
"element" (atom's element), "resSeq" (index of the residue)
"resName" (name of the residue), "chainID" (index of the chain),
following the same conventions as wwPDB 3.0 format.
bonds : np.ndarray, shape=(n_bonds, 2), dtype=int, optional
The bonds in the topology, represented as an n_bonds x 2 array
of the indices of the atoms involved in each bond. Specifiying
bonds here is optional. To create standard protein bonds, you can
use `create_standard_bonds` to "fill in" the bonds on your newly
created Topology object
See Also
--------
create_standard_bonds
"""
pd = import_('pandas')
for col in ["name", "element", "resSeq" , "resName", "chainID"]:
if col not in atoms.columns:
raise ValueError('dataframe must have column %s' % col)
out = cls()
if not isinstance(atoms, pd.DataFrame):
raise TypeError('atoms must be an instance of pandas.DataFrame. '
'You supplied a %s' % type(atoms))
if not isinstance(bonds, np.ndarray):
raise TypeError('bonds must be an instance of numpy.ndarray. '
'You supplied a %s' % type(bonds))
if not np.all(np.arange(len(atoms)) == atoms.index):
raise ValueError('atoms must be uniquely numbered starting from zero.')
out._atoms = [None for i in range(len(atoms))]
for ci in np.unique(atoms['chainID']):
chain_atoms = atoms[atoms['chainID'] == ci]
c = out.add_chain()
for ri in np.unique(chain_atoms['resSeq']):
residue_atoms = chain_atoms[chain_atoms['resSeq'] == ri]
rnames = residue_atoms['resName']
residue_name = np.array(rnames)[0]
if not np.all(rnames == residue_name):
raise ValueError('All of the atoms with residue index %d do not share the same residue name' % ri)
r = out.add_residue(residue_name, c, ri)
for ai, atom in residue_atoms.iterrows():
if atom['element'] == "":
element = None
else:
element = elem.get_by_symbol(atom['element'])
a = Atom(atom['name'], element, ai, r)
out._atoms[ai] = a
r._atoms.append(a)
if bonds is not None:
for ai1, ai2 in bonds:
out.add_bond(out.atom(ai1), out.atom(ai2))
out._numAtoms = out.n_atoms
return out
def __eq__(self, other):
"""Are two topologies equal?
Parameters
----------
other : object
The object to compare to
Returns
-------
equality : bool
Are the two topologies identical?
"""
if not isinstance(other, Topology):
return False
if self is other:
return True
if len(self._chains) != len(other._chains):
return False
for c1, c2 in zip(self.chains, other.chains):
if c1.index != c2.index:
return False
if len(c1._residues) != len(c2._residues):
return False
for r1, r2 in zip(c1.residues, c2.residues):
if (r1.index != r1.index) or (r1.name != r2.name): # or (r1.resSeq != r2.resSeq):
return False
if len(r1._atoms) != len(r2._atoms):
return False
for a1, a2 in zip(r1.atoms, r2.atoms):
if (a1.index != a2.index) or (a1.name != a2.name):
return False
if a1.element is not None and a2.element is not None:
if a1.element != a2.element:
return False
#for attr in ['atomic_number', 'name', 'symbol']:
# if getattr(a1.element, attr) != getattr(a2.element, attr):
# return False
if len(self._bonds) != len(other._bonds):
return False
# the bond ordering is somewhat ambiguous, so try and fix it for comparison
self_sorted_bonds = sorted([(a1.index, b1.index) for (a1, b1) in self.bonds])
other_sorted_bonds = sorted([(a2.index, b2.index) for (a2, b2) in other.bonds])
for i in range(len(self._bonds)):
(a1, b1) = self_sorted_bonds[i]
(a2, b2) = other_sorted_bonds[i]
if (a1 != a2) or (b1 != b2):
return False
return True
def add_chain(self):
"""Create a new Chain and add it to the Topology.
Returns
-------
chain : mdtraj.topology.Chain
the newly created Chain
"""
chain = Chain(len(self._chains), self)
self._chains.append(chain)
return chain
def add_residue(self, name, chain, resSeq=None):
"""Create a new Residue and add it to the Topology.
Parameters
----------
name : str
The name of the residue to add
chain : mdtraj.topology.Chain
The Chain to add it to
resSeq : int, optional
Residue sequence number, such as from a PDB record. These sequence
numbers are arbitrary, and do not necessarily start at 0 (or 1).
If not supplied, the resSeq attribute will be set to the
residue's sequential (0 based) index.
Returns
-------
residue : mdtraj.topology.Residue
The newly created Residue
"""
if resSeq is None:
resSeq = self._numResidues
residue = Residue(name, self._numResidues, chain, resSeq)
self._residues.append(residue)
self._numResidues += 1
chain._residues.append(residue)
return residue
def add_atom(self, name, element, residue):
"""Create a new Atom and add it to the Topology.
Parameters
----------
name : str
The name of the atom to add
element : mdtraj.element.Element
The element of the atom to add
residue : mdtraj.topology.Residue
The Residue to add it to
Returns
-------
atom : mdtraj.topology.Atom
the newly created Atom
"""
atom = Atom(name, element, self._numAtoms, residue)
self._atoms.append(atom)
self._numAtoms += 1
residue._atoms.append(atom)
return atom
def add_bond(self, atom1, atom2):
"""Create a new bond and add it to the Topology.
Parameters
----------
atom1 : mdtraj.topology.Atom
The first Atom connected by the bond
atom2 : mdtraj.topology.Atom
The second Atom connected by the bond
"""
if atom1.index < atom2.index:
self._bonds.append((atom1, atom2))
else:
self._bonds.append((atom2, atom1))
def chain(self, index):
"""Get a specific chain by index. These indices
start from zero.
Returns
-------
chain : Chain
The `index`-th chain in the topology.
"""
return self._chains[index]
@property
def chains(self):
"""Iterator over all Chains in the Topology.
Returns
-------
chainiter : listiterator
Iterator over all Chains in the Topology.
"""
return iter(self._chains)
@property
def n_chains(self):
"""Get the number of chains in the Topology"""
return len(self._chains)
def residue(self, index):
"""Get a specific residue by index. These indices
start from zero.
Returns
-------
residue : Residue
The `index`-th residue in the topology.
"""
return self._residues[index]
@property
def residues(self):
"""Iterator over all Residues in the Topology.
Returns
-------
residueiter : generator
Iterator over all Residues in the Topology.
"""
for chain in self._chains:
for residue in chain._residues:
yield residue
@property
def n_residues(self):
"""Get the number of residues in the Topology"""
return len(self._residues)
def atom(self, index):
"""Get a specific atom by index. These indices
start from zero.
Returns
-------
atom : Atom
The `index`-th atom in the topology.
"""
return self._atoms[index]
@property
def atoms(self):
"""Iterator over all Atoms in the Topology.
Returns
-------
atomiter : generator
Iterator over all Atoms in the Topology.
"""
for chain in self._chains:
for residue in chain._residues:
for atom in residue._atoms:
yield atom
@property
def n_atoms(self):
"""Get the number of atoms in the Topology"""
return len(self._atoms)
@property
def bonds(self):
"""Iterator over all bonds (each represented as a tuple of two Atoms) in the Topology.
Returns
-------
atomiter : generator
Iterator over all tuple of Atoms in the Trajectory involved in a bond.
"""
return iter(self._bonds)
def create_standard_bonds(self):
"""Create bonds based on the atom and residue names for all standard residue types.
"""
if len(Topology._standardBonds) == 0:
# Load the standard bond defitions.
tree = etree.parse(os.path.join(os.path.dirname(__file__), '..', 'formats', 'pdb', 'data', 'residues.xml'))
for residue in tree.getroot().findall('Residue'):
bonds = []
Topology._standardBonds[residue.attrib['name']] = bonds
for bond in residue.findall('Bond'):
bonds.append((bond.attrib['from'], bond.attrib['to']))
for chain in self._chains:
# First build a map of atom names to atoms.
atomMaps = []
for residue in chain._residues:
atomMap = {}
atomMaps.append(atomMap)
for atom in residue._atoms:
atomMap[atom.name] = atom
# Loop over residues and construct bonds.
for i in range(len(chain._residues)):
name = chain._residues[i].name
if name in Topology._standardBonds:
for bond in Topology._standardBonds[name]:
if bond[0].startswith('-') and i > 0:
fromResidue = i-1
fromAtom = bond[0][1:]
elif bond[0].startswith('+') and i <len(chain._residues):
fromResidue = i+1
fromAtom = bond[0][1:]
else:
fromResidue = i
fromAtom = bond[0]
if bond[1].startswith('-') and i > 0:
toResidue = i-1
toAtom = bond[1][1:]
elif bond[1].startswith('+') and i <len(chain._residues):
toResidue = i+1
toAtom = bond[1][1:]
else:
toResidue = i
toAtom = bond[1]
if fromAtom in atomMaps[fromResidue] and toAtom in atomMaps[toResidue]:
self.add_bond(atomMaps[fromResidue][fromAtom], atomMaps[toResidue][toAtom])
def create_disulfide_bonds(self, positions):
"""Identify disulfide bonds based on proximity and add them to the Topology.
Parameters
----------
positions : list
The list of atomic positions based on which to identify bonded atoms
"""
def isCyx(res):
names = [atom.name for atom in res._atoms]
return 'SG' in names and 'HG' not in names
cyx = [res for res in self.residues if res.name == 'CYS' and isCyx(res)]
atomNames = [[atom.name for atom in res._atoms] for res in cyx]
for i in range(len(cyx)):
sg1 = cyx[i]._atoms[atomNames[i].index('SG')]
pos1 = positions[sg1.index]
for j in range(i):
sg2 = cyx[j]._atoms[atomNames[j].index('SG')]
pos2 = positions[sg2.index]
delta = [x-y for (x,y) in zip(pos1, pos2)]
distance = np.sqrt(delta[0]*delta[0] + delta[1]*delta[1] + delta[2]*delta[2])
if distance < 0.3: # this is supposed to be nm. I think we're good
self.add_bond(sg1, sg2)
def subset(self, atom_indices):
"""Create a new Topology from a subset of the atoms in an existing topology.
Notes
-----
The existing topology will not be altered.
Parameters
----------
atom_indices array_like
A list of the indices corresponding to the atoms in that you'd
like to retain.
"""
return _topology_from_subset(self, atom_indices)
class Chain(object):
"""A Chain object represents a chain within a Topology.
Attributes
----------
index : int
The index of the Chain within its Topology
topology : mdtraj.Topology
The Topology this Chain belongs to
residues : genetator
Iterator over all Residues in the Chain.
atoms : generator
Iterator over all Atoms in the Chain.
"""
def __init__(self, index, topology):
"""Construct a new Chain. You should call add_chain() on the Topology instead of calling this directly."""
## The index of the Chain within its Topology
self.index = index
## The Topology this Chain belongs to
self.topology = topology
self._residues = []
@property
def residues(self):
"""Iterator over all Residues in the Chain.
Returns
-------
residueiter : listiterator
Iterator over all Residues in the Topology.
"""
return iter(self._residues)
def residue(self, index):
"""Get a specific residue in this Chain
Returns
-------
residue : Residue
"""
return self._residue[index]
@property
def n_residues(self):
"Get the number of residues in this Chain"
return len(self._residues)
@property
def atoms(self):
"""Iterator over all Atoms in the Chain.
Returns
-------
atomiter : generator
Iterator over all Atoms in the Chain.
"""
for residue in self._residues:
for atom in residue._atoms:
yield atom
def atom(self, index):
"""Get a specific atom in this Chain
Returns
-------
atom : Atom
"""
# this could be made faster by caching the list
# of atoms internally if necessary
return next(itertools.islice(self.atoms, index, index+1))
@property
def n_atoms(self):
"""Get the number of atoms in this Chain"""
return sum(r.n_atoms for r in self._residues)
class Residue(object):
"""A Residue object represents a residue within a Topology.
Attributes
----------
name : str
The name of the Residue
index : int
The index of the Residue within its Topology
chain : int
The residue sequence number
"""
def __init__(self, name, index, chain, resSeq):
"""Construct a new Residue. You should call add_residue()
on the Topology instead of calling this directly."""
self.name = name
self.index = index
self.chain = chain
self.resSeq = resSeq
self._atoms = []
@property
def atoms(self):
"""Iterator over all Atoms in the Residue.
Returns
-------
atomiter : listiterator
Iterator over all Atoms in the Residue.
"""
return iter(self._atoms)
def atom(self, index):
"""Get a specific atom in this Residue.
Returns
-------
atom : Atom
"""
return self._atoms[index]
@property
def n_atoms(self):
"""Get the number of atoms in this Residue"""
return len(self._atoms)
def __str__(self):
return '%s%s' % (self.name, self.resSeq)
class Atom(object):
"""An Atom object represents a residue within a Topology.
Attributes
----------
name : str
The name of the Atom
element : mdtraj.element.Element
The element of the Atoms
index : int
The index of the Atom within its Topology
residue : mdtraj.topology.Residue
The Residue this Atom belongs to
"""
def __init__(self, name, element, index, residue):
"""Construct a new Atom. You should call add_atom() on the Topology instead of calling this directly."""
## The name of the Atom
self.name = name
## That Atom's element
self.element = element
## The index of the Atom within its Topology
self.index = index
## The Residue this Atom belongs to
self.residue = residue
def __eq__(self, other):
""" Check whether two Atom objects are equal. """
if self.name != other.name:
return False
if self.index != other.index:
return False
if self.element.name != other.element.name:
return False
if self.residue.name != other.residue.name:
return False
if self.residue.index != other.residue.index:
return False
if self.residue.chain.index != other.residue.chain.index:
return False
return True
def __hash__(self):
""" A quick comparison. """
return self.index
def __str__(self):
return '%s-%s' % (self.residue, self.name)
| lgpl-2.1 |
idlead/scikit-learn | sklearn/datasets/samples_generator.py | 20 | 56502 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
sburns/PyCap | redcap/project.py | 2 | 36294 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""User facing class for interacting with a REDCap Project"""
import json
import warnings
import semantic_version
from .request import RCRequest, RedcapError, RequestException
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
__author__ = "Scott Burns <scott.s.burnsgmail.com>"
__license__ = "MIT"
__copyright__ = "2014, Vanderbilt University"
# pylint: disable=too-many-lines
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
# pylint: disable=too-many-public-methods
# pylint: disable=redefined-builtin
class Project(object):
"""Main class for interacting with REDCap projects"""
def __init__(self, url, token, name="", verify_ssl=True, lazy=False):
"""
Parameters
----------
url : str
API URL to your REDCap server
token : str
API token to your project
name : str, optional
name for project
verify_ssl : boolean, str
Verify SSL, default True. Can pass path to CA_BUNDLE.
"""
self.token = token
self.name = name
self.url = url
self.verify = verify_ssl
self.metadata = None
self.redcap_version = None
self.field_names = None
# We'll use the first field as the default id for each row
self.def_field = None
self.field_labels = None
self.forms = None
self.events = None
self.arm_nums = None
self.arm_names = None
self.configured = False
if not lazy:
self.configure()
def configure(self):
"""Fill in project attributes"""
try:
self.metadata = self.__md()
except RequestException as request_fail:
raise RedcapError(
"Exporting metadata failed. Check your URL and token."
) from request_fail
try:
self.redcap_version = self.__rcv()
except Exception as general_fail:
raise RedcapError(
"Determination of REDCap version failed"
) from general_fail
self.field_names = self.filter_metadata("field_name")
# we'll use the first field as the default id for each row
self.def_field = self.field_names[0]
self.field_labels = self.filter_metadata("field_label")
self.forms = tuple(set(c["form_name"] for c in self.metadata))
# determine whether longitudinal
ev_data = self._call_api(self.__basepl("event"), "exp_event")[0]
arm_data = self._call_api(self.__basepl("arm"), "exp_arm")[0]
if isinstance(ev_data, dict) and ("error" in ev_data.keys()):
events = tuple([])
else:
events = ev_data
if isinstance(arm_data, dict) and ("error" in arm_data.keys()):
arm_nums = tuple([])
arm_names = tuple([])
else:
arm_nums = tuple([a["arm_num"] for a in arm_data])
arm_names = tuple([a["name"] for a in arm_data])
self.events = events
self.arm_nums = arm_nums
self.arm_names = arm_names
self.configured = True
def __md(self):
"""Return the project's metadata structure"""
p_l = self.__basepl("metadata")
p_l["content"] = "metadata"
return self._call_api(p_l, "metadata")[0]
def __basepl(self, content, rec_type="flat", format="json"):
"""Return a dictionary which can be used as is or added to for
payloads"""
payload_dict = {"token": self.token, "content": content, "format": format}
if content not in ["metapayload_dictata", "file"]:
payload_dict["type"] = rec_type
return payload_dict
def __rcv(self):
payload = self.__basepl("version")
rcv = self._call_api(payload, "version")[0].decode("utf-8")
if "error" in rcv:
warnings.warn("Version information not available for this REDCap instance")
return ""
if semantic_version.validate(rcv):
return semantic_version.Version(rcv)
return rcv
def is_longitudinal(self):
"""
Returns
-------
boolean :
longitudinal status of this project
"""
return (
len(self.events) > 0 and len(self.arm_nums) > 0 and len(self.arm_names) > 0
)
def filter_metadata(self, key):
"""
Return a list of values for the metadata key from each field
of the project's metadata.
Parameters
----------
key: str
A known key in the metadata structure
Returns
-------
filtered :
attribute list from each field
"""
filtered = [field[key] for field in self.metadata if key in field]
if len(filtered) == 0:
raise KeyError("Key not found in metadata")
return filtered
def _kwargs(self):
"""Private method to build a dict for sending to RCRequest
Other default kwargs to the http library should go here"""
return {"verify": self.verify}
def _call_api(self, payload, typpe, **kwargs):
request_kwargs = self._kwargs()
request_kwargs.update(kwargs)
rcr = RCRequest(self.url, payload, typpe)
return rcr.execute(**request_kwargs)
def export_fem(self, arms=None, format="json", df_kwargs=None):
"""
Export the project's form to event mapping
Parameters
----------
arms : list
Limit exported form event mappings to these arm numbers
format : (``'json'``), ``'csv'``, ``'xml'``
Return the form event mappings in native objects,
csv or xml, ``'df''`` will return a ``pandas.DataFrame``
df_kwargs : dict
Passed to pandas.read_csv to control construction of
returned DataFrame
Returns
-------
fem : list, str, ``pandas.DataFrame``
form-event mapping for the project
"""
ret_format = format
if format == "df":
ret_format = "csv"
payload = self.__basepl("formEventMapping", format=ret_format)
if arms:
for i, value in enumerate(arms):
payload["arms[{}]".format(i)] = value
response, _ = self._call_api(payload, "exp_fem")
if format in ("json", "csv", "xml"):
return response
if format != "df":
raise ValueError(("Unsupported format: '{}'").format(format))
if not df_kwargs:
df_kwargs = {}
return self.read_csv(StringIO(response), **df_kwargs)
def export_field_names(self, field=None, format="json", df_kwargs=None):
"""
Export the project's export field names
Parameters
----------
fields : str
Limit exported field name to this field (only single field supported).
When not provided, all fields returned.
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Return the metadata in native objects, csv or xml.
``'df'`` will return a ``pandas.DataFrame``.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default ``{'index_col': 'original_field_name'}``
Returns
-------
metadata : list, str, ``pandas.DataFrame``
metadata structure for the project.
"""
ret_format = format
if format == "df":
ret_format = "csv"
payload = self.__basepl("exportFieldNames", format=ret_format)
if field:
payload["field"] = field
response, _ = self._call_api(payload, "exp_field_names")
if format in ("json", "csv", "xml"):
return response
if format != "df":
raise ValueError(("Unsupported format: '{}'").format(format))
if not df_kwargs:
df_kwargs = {"index_col": "original_field_name"}
return self.read_csv(StringIO(response), **df_kwargs)
def export_metadata(self, fields=None, forms=None, format="json", df_kwargs=None):
"""
Export the project's metadata
Parameters
----------
fields : list
Limit exported metadata to these fields
forms : list
Limit exported metadata to these forms
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Return the metadata in native objects, csv or xml.
``'df'`` will return a ``pandas.DataFrame``.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default ``{'index_col': 'field_name'}``
Returns
-------
metadata : list, str, ``pandas.DataFrame``
metadata sttructure for the project.
"""
ret_format = format
if format == "df":
ret_format = "csv"
payload = self.__basepl("metadata", format=ret_format)
to_add = [fields, forms]
str_add = ["fields", "forms"]
for key, data in zip(str_add, to_add):
if data:
for i, value in enumerate(data):
payload["{}[{}]".format(key, i)] = value
response, _ = self._call_api(payload, "metadata")
if format in ("json", "csv", "xml"):
return response
if format != "df":
raise ValueError(("Unsupported format: '{}'").format(format))
if not df_kwargs:
df_kwargs = {"index_col": "field_name"}
return self.read_csv(StringIO(response), **df_kwargs)
def delete_records(self, records):
"""
Delete records from the Project.
Parameters
----------
records : list
List of record IDs that you want to delete from the project
Returns
-------
response : int
Number of records deleted
"""
payload = dict()
payload["action"] = "delete"
payload["content"] = "record"
payload["token"] = self.token
# Turn list of records into dict, and append to payload
records_dict = {
"records[{}]".format(idx): record for idx, record in enumerate(records)
}
payload.update(records_dict)
payload["format"] = format
response, _ = self._call_api(payload, "del_record")
return response
# pylint: disable=too-many-branches
# pylint: disable=too-many-locals
def export_records(
self,
records=None,
fields=None,
forms=None,
events=None,
raw_or_label="raw",
event_name="label",
format="json",
export_survey_fields=False,
export_data_access_groups=False,
df_kwargs=None,
export_checkbox_labels=False,
filter_logic=None,
date_begin=None,
date_end=None,
):
"""
Export data from the REDCap project.
Parameters
----------
records : list
array of record names specifying specific records to export.
by default, all records are exported
fields : list
array of field names specifying specific fields to pull
by default, all fields are exported
forms : list
array of form names to export. If in the web UI, the form
name has a space in it, replace the space with an underscore
by default, all forms are exported
events : list
an array of unique event names from which to export records
:note: this only applies to longitudinal projects
raw_or_label : (``'raw'``), ``'label'``, ``'both'``
export the raw coded values or labels for the options of
multiple choice fields, or both
event_name : (``'label'``), ``'unique'``
export the unique event name or the event label
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Format of returned data. ``'json'`` returns json-decoded
objects while ``'csv'`` and ``'xml'`` return other formats.
``'df'`` will attempt to return a ``pandas.DataFrame``.
export_survey_fields : (``False``), True
specifies whether or not to export the survey identifier
field (e.g., "redcap_survey_identifier") or survey timestamp
fields (e.g., form_name+"_timestamp") when surveys are
utilized in the project.
export_data_access_groups : (``False``), ``True``
specifies whether or not to export the
``"redcap_data_access_group"`` field when data access groups
are utilized in the project.
:note: This flag is only viable if the user whose token is
being used to make the API request is *not* in a data
access group. If the user is in a group, then this flag
will revert to its default value.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default, ``{'index_col': self.def_field}``
export_checkbox_labels : (``False``), ``True``
specify whether to export checkbox values as their label on
export.
filter_logic : string
specify the filterLogic to be sent to the API.
date_begin : datetime
for the dateRangeStart filtering of the API
date_end : datetime
for the dateRangeEnd filtering snet to the API
Returns
-------
data : list, str, ``pandas.DataFrame``
exported data
"""
ret_format = format
if format == "df":
ret_format = "csv"
payload = self.__basepl("record", format=ret_format)
fields = self.backfill_fields(fields, forms)
keys_to_add = (
records,
fields,
forms,
events,
raw_or_label,
event_name,
export_survey_fields,
export_data_access_groups,
export_checkbox_labels,
)
str_keys = (
"records",
"fields",
"forms",
"events",
"rawOrLabel",
"eventName",
"exportSurveyFields",
"exportDataAccessGroups",
"exportCheckboxLabel",
)
for key, data in zip(str_keys, keys_to_add):
if data:
if key in ("fields", "records", "forms", "events"):
for i, value in enumerate(data):
payload["{}[{}]".format(key, i)] = value
else:
payload[key] = data
if date_begin:
payload["dateRangeBegin"] = date_begin.strftime("%Y-%m-%d %H:%M:%S")
if date_end:
payload["dateRangeEnd"] = date_end.strftime("%Y-%m-%d %H:%M:%S")
if filter_logic:
payload["filterLogic"] = filter_logic
response, _ = self._call_api(payload, "exp_record")
if format in ("json", "csv", "xml"):
return response
if format != "df":
raise ValueError(("Unsupported format: '{}'").format(format))
if not df_kwargs:
if self.is_longitudinal():
df_kwargs = {"index_col": [self.def_field, "redcap_event_name"]}
else:
df_kwargs = {"index_col": self.def_field}
buf = StringIO(response)
dataframe = self.read_csv(buf, **df_kwargs)
buf.close()
return dataframe
# pylint: enable=too-many-branches
# pylint: enable=too-many-locals
# pylint: disable=import-outside-toplevel
@staticmethod
def read_csv(buf, **df_kwargs):
"""Wrapper around pandas read_csv that handles EmptyDataError"""
from pandas import DataFrame, read_csv
from pandas.errors import EmptyDataError
try:
dataframe = read_csv(buf, **df_kwargs)
except EmptyDataError:
dataframe = DataFrame()
return dataframe
# pylint: enable=import-outside-toplevel
def metadata_type(self, field_name):
"""If the given field_name is validated by REDCap, return it's type"""
return self.__meta_metadata(
field_name, "text_validation_type_or_show_slider_number"
)
def __meta_metadata(self, field, key):
"""Return the value for key for the field in the metadata"""
metadata_field = ""
try:
metadata_field = str(
[f[key] for f in self.metadata if f["field_name"] == field][0]
)
except IndexError:
print("%s not in metadata field:%s" % (key, field))
return metadata_field
else:
return metadata_field
def backfill_fields(self, fields, forms):
"""
Properly backfill fields to explicitly request specific
keys. The issue is that >6.X servers *only* return requested fields
so to improve backwards compatiblity for PyCap clients, add specific fields
when required.
Parameters
----------
fields: list
requested fields
forms: list
requested forms
Returns
-------
new fields, forms
"""
if forms and not fields:
new_fields = [self.def_field]
elif fields and self.def_field not in fields:
new_fields = list(fields)
if self.def_field not in fields:
new_fields.append(self.def_field)
elif not fields:
new_fields = self.field_names
else:
new_fields = list(fields)
return new_fields
def names_labels(self, do_print=False):
"""Simple helper function to get all field names and labels """
if do_print:
for name, label in zip(self.field_names, self.field_labels):
print("%s --> %s" % (str(name), str(label)))
return self.field_names, self.field_labels
def import_records(
self,
to_import,
overwrite="normal",
format="json",
return_format="json",
return_content="count",
date_format="YMD",
force_auto_number=False,
):
"""
Import data into the RedCap Project
Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
:note:
Keys of the dictionaries should be subset of project's,
fields, but this isn't a requirement. If you provide keys
that aren't defined fields, the returned response will
contain an ``'error'`` key.
overwrite : ('normal'), 'overwrite'
``'overwrite'`` will erase values previously stored in the
database if not specified in the to_import dictionaries.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
return_format : ('json'), 'csv', 'xml'
Response format. By default, response will be json-decoded.
return_content : ('count'), 'ids', 'nothing'
By default, the response contains a 'count' key with the number of
records just imported. By specifying 'ids', a list of ids
imported will be returned. 'nothing' will only return
the HTTP status code and no message.
date_format : ('YMD'), 'DMY', 'MDY'
Describes the formatting of dates. By default, date strings
are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date
strings are formatted as 'MM/DD/YYYY' set this parameter as
'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No
other formattings are allowed.
force_auto_number : ('False') Enables automatic assignment of record IDs
of imported records by REDCap. If this is set to true, and auto-numbering
for records is enabled for the project, auto-numbering of imported records
will be enabled.
Returns
-------
response : dict, str
response from REDCap API, json-decoded if ``return_format`` == ``'json'``
"""
payload = self._initialize_import_payload(to_import, format, "record")
payload["overwriteBehavior"] = overwrite
payload["returnFormat"] = return_format
payload["returnContent"] = return_content
payload["dateFormat"] = date_format
payload["forceAutoNumber"] = force_auto_number
response = self._call_api(payload, "imp_record")[0]
if "error" in response:
raise RedcapError(str(response))
return response
def import_metadata(
self, to_import, format="json", return_format="json", date_format="YMD"
):
"""
Import metadata (DataDict) into the RedCap Project
Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
return_format : ('json'), 'csv', 'xml'
Response format. By default, response will be json-decoded.
date_format : ('YMD'), 'DMY', 'MDY'
Describes the formatting of dates. By default, date strings
are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date
strings are formatted as 'MM/DD/YYYY' set this parameter as
'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No
other formattings are allowed.
Returns
-------
response : dict, str
response from REDCap API, json-decoded if ``return_format`` == ``'json'``
If successful, the number of imported fields
"""
payload = self._initialize_import_payload(to_import, format, "metadata")
payload["returnFormat"] = return_format
payload["dateFormat"] = date_format
response = self._call_api(payload, "imp_metadata")[0]
if "error" in str(response):
raise RedcapError(str(response))
return response
def _initialize_import_payload(self, to_import, format, data_type):
"""
Standardize the data to be imported and add it to the payload
Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
data_type: 'record', 'metadata'
The kind of data that are imported
Returns
-------
payload : (dict, str)
The initialized payload dictionary and updated format
"""
payload = self.__basepl(data_type)
# pylint: disable=comparison-with-callable
if hasattr(to_import, "to_csv"):
# We'll assume it's a df
buf = StringIO()
if data_type == "record":
if self.is_longitudinal():
csv_kwargs = {"index_label": [self.def_field, "redcap_event_name"]}
else:
csv_kwargs = {"index_label": self.def_field}
elif data_type == "metadata":
csv_kwargs = {"index": False}
to_import.to_csv(buf, **csv_kwargs)
payload["data"] = buf.getvalue()
buf.close()
format = "csv"
elif format == "json":
payload["data"] = json.dumps(to_import, separators=(",", ":"))
else:
# don't do anything to csv/xml
payload["data"] = to_import
# pylint: enable=comparison-with-callable
payload["format"] = format
return payload
def export_file(self, record, field, event=None, return_format="json"):
"""
Export the contents of a file stored for a particular record
Notes
-----
Unlike other export methods, this works on a single record.
Parameters
----------
record : str
record ID
field : str
field name containing the file to be exported.
event: str
for longitudinal projects, specify the unique event here
return_format: ('json'), 'csv', 'xml'
format of error message
Returns
-------
content : bytes
content of the file
content_map : dict
content-type dictionary
"""
self._check_file_field(field)
# load up payload
payload = self.__basepl(content="file", format=return_format)
# there's no format field in this call
del payload["format"]
payload["returnFormat"] = return_format
payload["action"] = "export"
payload["field"] = field
payload["record"] = record
if event:
payload["event"] = event
content, headers = self._call_api(payload, "exp_file")
# REDCap adds some useful things in content-type
if "content-type" in headers:
splat = [
key_values.strip() for key_values in headers["content-type"].split(";")
]
key_values = [
(key_values.split("=")[0], key_values.split("=")[1].replace('"', ""))
for key_values in splat
if "=" in key_values
]
content_map = dict(key_values)
else:
content_map = {}
return content, content_map
def import_file(
self,
record,
field,
fname,
fobj,
event=None,
repeat_instance=None,
return_format="json",
):
"""
Import the contents of a file represented by fobj to a
particular records field
Parameters
----------
record : str
record ID
field : str
field name where the file will go
fname : str
file name visible in REDCap UI
fobj : file object
file object as returned by `open`
event : str
for longitudinal projects, specify the unique event here
repeat_instance : int
(only for projects with repeating instruments/events)
The repeat instance number of the repeating event (if longitudinal)
or the repeating instrument (if classic or longitudinal).
return_format : ('json'), 'csv', 'xml'
format of error message
Returns
-------
response :
response from server as specified by ``return_format``
"""
self._check_file_field(field)
# load up payload
payload = self.__basepl(content="file", format=return_format)
# no format in this call
del payload["format"]
payload["returnFormat"] = return_format
payload["action"] = "import"
payload["field"] = field
payload["record"] = record
if event:
payload["event"] = event
if repeat_instance:
payload["repeat_instance"] = repeat_instance
file_kwargs = {"files": {"file": (fname, fobj)}}
return self._call_api(payload, "imp_file", **file_kwargs)[0]
def delete_file(self, record, field, return_format="json", event=None):
"""
Delete a file from REDCap
Notes
-----
There is no undo button to this.
Parameters
----------
record : str
record ID
field : str
field name
return_format : (``'json'``), ``'csv'``, ``'xml'``
return format for error message
event : str
If longitudinal project, event to delete file from
Returns
-------
response : dict, str
response from REDCap after deleting file
"""
self._check_file_field(field)
# Load up payload
payload = self.__basepl(content="file", format=return_format)
del payload["format"]
payload["returnFormat"] = return_format
payload["action"] = "delete"
payload["record"] = record
payload["field"] = field
if event:
payload["event"] = event
return self._call_api(payload, "del_file")[0]
def _check_file_field(self, field):
"""Check that field exists and is a file field"""
is_field = field in self.field_names
is_file = self.__meta_metadata(field, "field_type") == "file"
if not (is_field and is_file):
msg = "'%s' is not a field or not a 'file' field" % field
raise ValueError(msg)
return True
def export_users(self, format="json"):
"""
Export the users of the Project
Notes
-----
Each user will have the following keys:
* ``'firstname'`` : User's first name
* ``'lastname'`` : User's last name
* ``'email'`` : Email address
* ``'username'`` : User's username
* ``'expiration'`` : Project access expiration date
* ``'data_access_group'`` : data access group ID
* ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set)
* ``'forms'`` : a list of dicts with a single key as the form name and
value is an integer describing that user's form rights,
where: 0=no access, 1=view records/responses and edit
records (survey responses are read-only), 2=read only, and
3=edit survey responses,
Parameters
----------
format : (``'json'``), ``'csv'``, ``'xml'``
response return format
Returns
-------
users: list, str
list of users dicts when ``'format'='json'``,
otherwise a string
"""
payload = self.__basepl(content="user", format=format)
return self._call_api(payload, "exp_user")[0]
def export_survey_participant_list(self, instrument, event=None, format="json"):
"""
Export the Survey Participant List
Notes
-----
The passed instrument must be set up as a survey instrument.
Parameters
----------
instrument: str
Name of instrument as seen in second column of Data Dictionary.
event: str
Unique event name, only used in longitudinal projects
format: (json, xml, csv), json by default
Format of returned data
"""
payload = self.__basepl(content="participantList", format=format)
payload["instrument"] = instrument
if event:
payload["event"] = event
return self._call_api(payload, "exp_survey_participant_list")
def generate_next_record_name(self):
"""Return the next record name for auto-numbering records"""
payload = self.__basepl(content="generateNextRecordName")
return self._call_api(payload, "exp_next_id")[0]
def export_project_info(self, format="json"):
"""
Export Project Information
Parameters
----------
format: (json, xml, csv), json by default
Format of returned data
"""
payload = self.__basepl(content="project", format=format)
return self._call_api(payload, "exp_proj")[0]
# pylint: disable=too-many-locals
def export_reports(
self,
format="json",
report_id=None,
raw_or_label="raw",
raw_or_label_headers="raw",
export_checkbox_labels="false",
decimal_character=None,
df_kwargs=None,
):
"""
Export a report of the Project
Notes
-----
Parameters
----------
report_id : the report ID number provided next to the report name
on the report list page
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Format of returned data. ``'json'`` returns json-decoded
objects while ``'csv'`` and ``'xml'`` return other formats.
``'df'`` will attempt to return a ``pandas.DataFrame``.
raw_or_label : raw [default], label - export the raw coded values or
labels for the options of multiple choice fields
raw_or_label_headers : raw [default], label - (for 'csv' format 'flat'
type only) for the CSV headers, export the variable/field names
(raw) or the field labels (label)
export_checkbox_labels : true, false [default] - specifies the format of
checkbox field values specifically when exporting the data as labels
(i.e., when rawOrLabel=label). When exporting labels, by default
(without providing the exportCheckboxLabel flag or if
exportCheckboxLabel=false), all checkboxes will either have a value
'Checked' if they are checked or 'Unchecked' if not checked.
But if exportCheckboxLabel is set to true, it will instead export
the checkbox value as the checkbox option's label (e.g., 'Choice 1')
if checked or it will be blank/empty (no value) if not checked.
If rawOrLabel=false, then the exportCheckboxLabel flag is ignored.
decimal_character : If specified, force all numbers into same decimal
format. You may choose to force all data values containing a
decimal to have the same decimal character, which will be applied
to all calc fields and number-validated text fields. Options
include comma ',' or dot/full stop '.', but if left blank/null,
then it will export numbers using the fields' native decimal format.
Simply provide the value of either ',' or '.' for this parameter.
Returns
-------
Per Redcap API:
Data from the project in the format and type specified
Ordered by the record (primary key of project) and then by event id
"""
ret_format = format
if format == "df":
ret_format = "csv"
payload = self.__basepl(content="report", format=ret_format)
keys_to_add = (
report_id,
raw_or_label,
raw_or_label_headers,
export_checkbox_labels,
decimal_character,
)
str_keys = (
"report_id",
"rawOrLabel",
"rawOrLabelHeaders",
"exportCheckboxLabel",
"decimalCharacter",
)
for key, data in zip(str_keys, keys_to_add):
if data:
payload[key] = data
response, _ = self._call_api(payload, "exp_report")
if format in ("json", "csv", "xml"):
return response
if format != "df":
raise ValueError(("Unsupported format: '{}'").format(format))
if not df_kwargs:
if self.is_longitudinal():
df_kwargs = {"index_col": [self.def_field, "redcap_event_name"]}
else:
df_kwargs = {"index_col": self.def_field}
buf = StringIO(response)
dataframe = self.read_csv(buf, **df_kwargs)
buf.close()
return dataframe
# pylint: enable=too-many-locals
# pylint: enable=too-many-instance-attributes
# pylint: enable=too-many-arguments
# pylint: enable=too-many-public-methods
# pylint: enable=redefined-builtin
| mit |
droythorne/folium | folium/folium.py | 4 | 50182 | # -*- coding: utf-8 -*-
"""
Folium
-------
Make beautiful, interactive maps with Python and Leaflet.js
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import codecs
import functools
import json
from uuid import uuid4
from jinja2 import Environment, PackageLoader
from pkg_resources import resource_string
from folium import utilities
from folium.six import text_type, binary_type, iteritems
import sys
import base64
ENV = Environment(loader=PackageLoader('folium', 'templates'))
def initialize_notebook():
"""Initialize the IPython notebook display elements."""
try:
from IPython.core.display import display, HTML
except ImportError:
print("IPython Notebook could not be loaded.")
lib_css = ENV.get_template('ipynb_init_css.html')
lib_js = ENV.get_template('ipynb_init_js.html')
leaflet_dvf = ENV.get_template('leaflet-dvf.markers.min.js')
display(HTML(lib_css.render()))
display(HTML(lib_js.render({'leaflet_dvf': leaflet_dvf.render()})))
def iter_obj(type):
"""Decorator to keep count of different map object types in self.mk_cnt."""
def decorator(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
self.mark_cnt[type] = self.mark_cnt.get(type, 0) + 1
func_result = func(self, *args, **kwargs)
return func_result
return wrapper
return decorator
class Map(object):
"""Create a Map with Folium."""
def __init__(self, location=None, width='100%', height='100%',
tiles='OpenStreetMap', API_key=None, max_zoom=18, min_zoom=1,
zoom_start=10, attr=None, min_lat=-90, max_lat=90,
min_lon=-180, max_lon=180):
"""Create a Map with Folium and Leaflet.js
Generate a base map of given width and height with either default
tilesets or a custom tileset URL. The following tilesets are built-in
to Folium. Pass any of the following to the "tiles" keyword:
- "OpenStreetMap"
- "MapQuest Open"
- "MapQuest Open Aerial"
- "Mapbox Bright" (Limited levels of zoom for free tiles)
- "Mapbox Control Room" (Limited levels of zoom for free tiles)
- "Stamen" (Terrain, Toner, and Watercolor)
- "Cloudmade" (Must pass API key)
- "Mapbox" (Must pass API key)
- "CartoDB" (positron and dark_matter)
You can pass a custom tileset to Folium by passing a Leaflet-style
URL to the tiles parameter:
http://{s}.yourtiles.com/{z}/{x}/{y}.png
Parameters
----------
location: tuple or list, default None
Latitude and Longitude of Map (Northing, Easting).
width: pixel int or percentage string (default: '100%')
Width of the map.
height: pixel int or percentage string (default: '100%')
Height of the map.
tiles: str, default 'OpenStreetMap'
Map tileset to use. Can use defaults or pass a custom URL.
API_key: str, default None
API key for Cloudmade or Mapbox tiles.
max_zoom: int, default 18
Maximum zoom depth for the map.
zoom_start: int, default 10
Initial zoom level for the map.
attr: string, default None
Map tile attribution; only required if passing custom tile URL.
Returns
-------
Folium Map Object
Examples
--------
>>>map = folium.Map(location=[45.523, -122.675], width=750, height=500)
>>>map = folium.Map(location=[45.523, -122.675],
tiles='Mapbox Control Room')
>>>map = folium.Map(location=(45.523, -122.675), max_zoom=20,
tiles='Cloudmade', API_key='YourKey')
>>>map = folium.Map(location=[45.523, -122.675], zoom_start=2,
tiles=('http://{s}.tiles.mapbox.com/v3/'
'mapbox.control-room/{z}/{x}/{y}.png'),
attr='Mapbox attribution')
"""
# Inits.
self.map_path = None
self.render_iframe = False
self.map_type = 'base'
self.map_id = '_'.join(['folium', uuid4().hex])
# Mark counter, JSON, Plugins.
self.mark_cnt = {}
self.json_data = {}
self.plugins = {}
# No location means we will use automatic bounds and ignore zoom
self.location = location
# If location is not passed, we center the map at 0,0
if not location:
location = [0, 0]
zoom_start = min_zoom
# Map Size Parameters.
try:
if isinstance(width, int):
width_type = 'px'
assert width > 0
else:
width_type = '%'
width = int(width.strip('%'))
assert 0 <= width <= 100
except:
msg = "Cannot parse width {!r} as {!r}".format
raise ValueError(msg(width, width_type))
self.width = width
try:
if isinstance(height, int):
height_type = 'px'
assert height > 0
else:
height_type = '%'
height = int(height.strip('%'))
assert 0 <= height <= 100
except:
msg = "Cannot parse height {!r} as {!r}".format
raise ValueError(msg(height, height_type))
self.height = height
self.map_size = {'width': width, 'height': height}
self._size = ('style="width: {0}{1}; height: {2}{3}"'
.format(width, width_type, height, height_type))
# Templates.
self.env = ENV
self.template_vars = dict(lat=location[0],
lon=location[1],
size=self._size,
max_zoom=max_zoom,
zoom_level=zoom_start,
map_id=self.map_id,
min_zoom=min_zoom,
min_lat=min_lat,
max_lat=max_lat,
min_lon=min_lon,
max_lon=max_lon)
# Tiles.
self.tiles = ''.join(tiles.lower().strip().split())
if self.tiles in ('cloudmade', 'mapbox') and not API_key:
raise ValueError('You must pass an API key if using Cloudmade'
' or non-default Mapbox tiles.')
self.default_tiles = ['openstreetmap', 'mapboxcontrolroom',
'mapquestopen', 'mapquestopenaerial',
'mapboxbright', 'mapbox', 'cloudmade',
'stamenterrain', 'stamentoner',
'stamenwatercolor',
'cartodbpositron', 'cartodbdark_matter']
self.tile_types = {}
for tile in self.default_tiles:
tile_path = 'tiles/%s' % tile
self.tile_types[tile] = {
'templ': self.env.get_template('%s/%s' % (tile_path,
'tiles.txt')),
'attr': self.env.get_template('%s/%s' % (tile_path,
'attr.txt')),
}
if self.tiles in self.tile_types:
self.template_vars['Tiles'] = (self.tile_types[self.tiles]['templ']
.render(API_key=API_key))
self.template_vars['attr'] = (self.tile_types[self.tiles]['attr']
.render())
else:
self.template_vars['Tiles'] = tiles
if not attr:
raise ValueError('Custom tiles must'
' also be passed an attribution')
if isinstance(attr, binary_type):
attr = text_type(attr, 'utf8')
self.template_vars['attr'] = attr
self.tile_types.update({'Custom': {'template': tiles,
'attr': attr}})
self.added_layers = []
self.template_vars.setdefault('wms_layers', [])
self.template_vars.setdefault('tile_layers', [])
self.template_vars.setdefault('image_layers', [])
@iter_obj('simple')
def add_tile_layer(self, tile_name=None, tile_url=None, active=False):
"""Adds a simple tile layer.
Parameters
----------
tile_name: string
name of the tile layer
tile_url: string
url location of the tile layer
active: boolean
should the layer be active when added
"""
if tile_name not in self.added_layers:
tile_name = tile_name.replace(" ", "_")
tile_temp = self.env.get_template('tile_layer.js')
tile = tile_temp.render({'tile_name': tile_name,
'tile_url': tile_url})
self.template_vars.setdefault('tile_layers', []).append((tile))
self.added_layers.append({tile_name: tile_url})
@iter_obj('simple')
def add_wms_layer(self, wms_name=None, wms_url=None, wms_format=None,
wms_layers=None, wms_transparent=True):
"""Adds a simple tile layer.
Parameters
----------
wms_name: string
name of wms layer
wms_url : string
url of wms layer
"""
if wms_name not in self.added_layers:
wms_name = wms_name.replace(" ", "_")
wms_temp = self.env.get_template('wms_layer.js')
wms = wms_temp.render({
'wms_name': wms_name,
'wms_url': wms_url,
'wms_format': wms_format,
'wms_layer_names': wms_layers,
'wms_transparent': str(wms_transparent).lower()})
self.template_vars.setdefault('wms_layers', []).append((wms))
self.added_layers.append({wms_name: wms_url})
@iter_obj('simple')
def add_layers_to_map(self):
"""
Required function to actually add the layers to the HTML packet.
"""
layers_temp = self.env.get_template('add_layers.js')
data_string = ''
for i, layer in enumerate(self.added_layers):
name = list(layer.keys())[0]
if i < len(self.added_layers)-1:
term_string = ",\n"
else:
term_string += "\n"
data_string += '\"{}\": {}'.format(name, name, term_string)
data_layers = layers_temp.render({'layers': data_string})
self.template_vars.setdefault('data_layers', []).append((data_layers))
@iter_obj('simple')
def simple_marker(self, location=None, popup=None,
marker_color='blue', marker_icon='info-sign',
clustered_marker=False, icon_angle=0, popup_width=300):
"""Create a simple stock Leaflet marker on the map, with optional
popup text or Vincent visualization.
Parameters
----------
location: tuple or list, default None
Latitude and Longitude of Marker (Northing, Easting)
popup: string or tuple, default 'Pop Text'
Input text or visualization for object. Can pass either text,
or a tuple of the form (Vincent object, 'vis_path.json')
It is possible to adjust the width of text/HTML popups
using the optional keywords `popup_width` (default is 300px).
marker_color
color of marker you want
marker_icon
icon from (http://getbootstrap.com/components/) you want on the
marker
clustered_marker
boolean of whether or not you want the marker clustered with
other markers
Returns
-------
Marker names and HTML in obj.template_vars
Example
-------
>>>map.simple_marker(location=[45.5, -122.3], popup='Portland, OR')
>>>map.simple_marker(location=[45.5, -122.3], popup=(vis, 'vis.json'))
"""
count = self.mark_cnt['simple']
mark_temp = self.env.get_template('simple_marker.js')
marker_num = 'marker_{0}'.format(count)
add_line = "{'icon':"+marker_num+"_icon}"
icon_temp = self.env.get_template('simple_icon.js')
icon = icon_temp.render({'icon': marker_icon,
'icon_name': marker_num+"_icon",
'markerColor': marker_color,
'icon_angle': icon_angle})
# Get marker and popup.
marker = mark_temp.render({'marker': 'marker_' + str(count),
'lat': location[0],
'lon': location[1],
'icon': add_line
})
popup_out = self._popup_render(popup=popup, mk_name='marker_',
count=count, width=popup_width)
if clustered_marker:
add_mark = 'clusteredmarkers.addLayer(marker_{0})'.format(count)
name = 'cluster_markers'
else:
add_mark = 'map.addLayer(marker_{0})'.format(count)
name = 'custom_markers'
append = (icon, marker, popup_out, add_mark)
self.template_vars.setdefault(name, []).append(append)
@iter_obj('div_mark')
def div_markers(self, locations=None, popups=None,
marker_size=10, popup_width=300):
"""Create a simple div marker on the map, with optional
popup text or Vincent visualization. Useful for marking points along a
line.
Parameters
----------
locations: list of locations, where each location is an array
Latitude and Longitude of Marker (Northing, Easting)
popup: list of popups, each popup should be a string or tuple.
Default 'Pop Text'
Input text or visualization for object. Can pass either text,
or a tuple of the form (Vincent object, 'vis_path.json')
It is possible to adjust the width of text/HTML popups
using the optional keywords `popup_width`.
(Leaflet default is 300px.)
marker_size
default is 5
Returns
-------
Marker names and HTML in obj.template_vars
Example
-------
>>> map.div_markers(locations=[[37.421114, -122.128314],
... [37.391637, -122.085416],
... [37.388832, -122.087709]],
... popups=['1437494575531',
... '1437492135937',
... '1437493590434'])
"""
call_cnt = self.mark_cnt['div_mark']
if locations is None or popups is None:
raise RuntimeError("Both locations and popups are mandatory")
for (point_cnt, (location, popup)) in enumerate(zip(locations,
popups)):
marker_num = 'div_marker_{0}_{1}'.format(call_cnt, point_cnt)
icon_temp = self.env.get_template('static_div_icon.js')
icon_name = marker_num+"_icon"
icon = icon_temp.render({'icon_name': icon_name,
'size': marker_size})
mark_temp = self.env.get_template('simple_marker.js')
# Get marker and popup.
marker = mark_temp.render({'marker': marker_num,
'lat': location[0],
'lon': location[1],
'icon': "{'icon':"+icon_name+"}"
})
mk_name = 'div_marker_{0}_'.format(call_cnt)
popup_out = self._popup_render(popup=popup,
mk_name=mk_name,
count=point_cnt, width=popup_width)
add_mark = 'map.addLayer(div_marker_{0}_{1})'.format(call_cnt,
point_cnt)
append = (icon, marker, popup_out, add_mark)
self.template_vars.setdefault('div_markers', []).append(append)
@iter_obj('line')
def line(self, locations,
line_color=None, line_opacity=None, line_weight=None,
popup=None, popup_width=300):
"""Add a line to the map with optional styles.
Parameters
----------
locations: list of points (latitude, longitude)
Latitude and Longitude of line (Northing, Easting)
line_color: string, default Leaflet's default ('#03f')
line_opacity: float, default Leaflet's default (0.5)
line_weight: float, default Leaflet's default (5)
popup: string or tuple, default 'Pop Text'
Input text or visualization for object. Can pass either text,
or a tuple of the form (Vincent object, 'vis_path.json')
It is possible to adjust the width of text/HTML popups
using the optional keywords `popup_width` (default is 300px).
Note: If the optional styles are omitted, they will not be included
in the HTML output and will obtain the Leaflet defaults listed above.
Example
-------
>>>map.line(locations=[(45.5, -122.3), (42.3, -71.0)])
>>>map.line(locations=[(45.5, -122.3), (42.3, -71.0)],
line_color='red', line_opacity=1.0)
"""
count = self.mark_cnt['line']
line_temp = self.env.get_template('polyline.js')
polyline_opts = {'color': line_color, 'weight': line_weight,
'opacity': line_opacity}
varname = 'line_{}'.format(count)
line_rendered = line_temp.render({'line': varname,
'locations': locations,
'options': polyline_opts})
popup_out = self._popup_render(popup=popup, mk_name='line_',
count=count, width=popup_width)
add_line = 'map.addLayer({});'.format(varname)
append = (line_rendered, popup_out, add_line)
self.template_vars.setdefault('lines', []).append((append))
@iter_obj('multiline')
def multiline(self, locations, line_color=None, line_opacity=None,
line_weight=None):
"""Add a multiPolyline to the map with optional styles.
A multiPolyline is single layer that consists of several polylines that
share styling/popup.
Parameters
----------
locations: list of lists of points (latitude, longitude)
Latitude and Longitude of line (Northing, Easting)
line_color: string, default Leaflet's default ('#03f')
line_opacity: float, default Leaflet's default (0.5)
line_weight: float, default Leaflet's default (5)
Note: If the optional styles are omitted, they will not be included
in the HTML output and will obtain the Leaflet defaults listed above.
Example
-------
# FIXME: Add another example.
>>> m.multiline(locations=[[(45.5236, -122.675), (45.5236, -122.675)],
[(45.5237, -122.675), (45.5237, -122.675)],
[(45.5238, -122.675), (45.5238, -122.675)]])
>>> m.multiline(locations=[[(45.5236, -122.675), (45.5236, -122.675)],
[(45.5237, -122.675), (45.5237, -122.675)],
[(45.5238, -122.675), (45.5238, -122.675)]],
line_color='red', line_weight=2,
line_opacity=1.0)
"""
count = self.mark_cnt['multiline']
multiline_temp = self.env.get_template('multi_polyline.js')
multiline_opts = {'color': line_color, 'weight': line_weight,
'opacity': line_opacity}
varname = 'multiline_{}'.format(count)
multiline_rendered = multiline_temp.render({'multiline': varname,
'locations': locations,
'options': multiline_opts})
add_multiline = 'map.addLayer({});'.format(varname)
append = (multiline_rendered, add_multiline)
self.template_vars.setdefault('multilines', []).append(append)
@iter_obj('circle')
def circle_marker(self, location=None, radius=500, popup=None,
line_color='black', fill_color='black',
fill_opacity=0.6, popup_width=300):
"""Create a simple circle marker on the map, with optional popup text
or Vincent visualization.
Parameters
----------
location: tuple or list, default None
Latitude and Longitude of Marker (Northing, Easting)
radius: int, default 500
Circle radius, in pixels
popup: string or tuple, default 'Pop Text'
Input text or visualization for object. Can pass either text,
or a tuple of the form (Vincent object, 'vis_path.json')
It is possible to adjust the width of text/HTML popups
using the optional keywords `popup_width` (default is 300px).
line_color: string, default black
Line color. Can pass hex value here as well.
fill_color: string, default black
Fill color. Can pass hex value here as well.
fill_opacity: float, default 0.6
Circle fill opacity
Returns
-------
Circle names and HTML in obj.template_vars
Example
-------
>>>map.circle_marker(location=[45.5, -122.3],
radius=1000, popup='Portland, OR')
>>>map.circle_marker(location=[45.5, -122.3],
radius=1000, popup=(bar_chart, 'bar_data.json'))
"""
count = self.mark_cnt['circle']
circle_temp = self.env.get_template('circle_marker.js')
circle = circle_temp.render({'circle': 'circle_' + str(count),
'radius': radius,
'lat': location[0], 'lon': location[1],
'line_color': line_color,
'fill_color': fill_color,
'fill_opacity': fill_opacity})
popup_out = self._popup_render(popup=popup, mk_name='circle_',
count=count, width=popup_width)
add_mark = 'map.addLayer(circle_{0})'.format(count)
self.template_vars.setdefault('markers', []).append((circle,
popup_out,
add_mark))
@iter_obj('polygon')
def polygon_marker(self, location=None, line_color='black', line_opacity=1,
line_weight=2, fill_color='blue', fill_opacity=1,
num_sides=4, rotation=0, radius=15, popup=None,
popup_width=300):
"""Custom markers using the Leaflet Data Vis Framework.
Parameters
----------
location: tuple or list, default None
Latitude and Longitude of Marker (Northing, Easting)
line_color: string, default 'black'
Marker line color
line_opacity: float, default 1
Line opacity, scale 0-1
line_weight: int, default 2
Stroke weight in pixels
fill_color: string, default 'blue'
Marker fill color
fill_opacity: float, default 1
Marker fill opacity
num_sides: int, default 4
Number of polygon sides
rotation: int, default 0
Rotation angle in degrees
radius: int, default 15
Marker radius, in pixels
popup: string or tuple, default 'Pop Text'
Input text or visualization for object. Can pass either text,
or a tuple of the form (Vincent object, 'vis_path.json')
It is possible to adjust the width of text/HTML popups
using the optional keywords `popup_width` (default is 300px).
Returns
-------
Polygon marker names and HTML in obj.template_vars
"""
count = self.mark_cnt['polygon']
poly_temp = self.env.get_template('poly_marker.js')
polygon = poly_temp.render({'marker': 'polygon_' + str(count),
'lat': location[0],
'lon': location[1],
'line_color': line_color,
'line_opacity': line_opacity,
'line_weight': line_weight,
'fill_color': fill_color,
'fill_opacity': fill_opacity,
'num_sides': num_sides,
'rotation': rotation,
'radius': radius})
popup_out = self._popup_render(popup=popup, mk_name='polygon_',
count=count, width=popup_width)
add_mark = 'map.addLayer(polygon_{0})'.format(count)
self.template_vars.setdefault('markers', []).append((polygon,
popup_out,
add_mark))
# Update JS/CSS and other Plugin files.
js_temp = self.env.get_template('dvf_js_ref.txt').render()
self.template_vars.update({'dvf_js': js_temp})
polygon_js = resource_string('folium',
'plugins/leaflet-dvf.markers.min.js')
self.plugins.update({'leaflet-dvf.markers.min.js': polygon_js})
def lat_lng_popover(self):
"""Enable popovers to display Lat and Lon on each click."""
latlng_temp = self.env.get_template('lat_lng_popover.js')
self.template_vars.update({'lat_lng_pop': latlng_temp.render()})
def click_for_marker(self, popup=None):
"""Enable the addition of markers via clicking on the map. The marker
popup defaults to Lat/Lon, but custom text can be passed via the
popup parameter. Double click markers to remove them.
Parameters
----------
popup:
Custom popup text
Example
-------
>>>map.click_for_marker(popup='Your Custom Text')
"""
latlng = '"Latitude: " + lat + "<br>Longitude: " + lng '
click_temp = self.env.get_template('click_for_marker.js')
if popup:
popup_txt = ''.join(['"', popup, '"'])
else:
popup_txt = latlng
click_str = click_temp.render({'popup': popup_txt})
self.template_vars.update({'click_pop': click_str})
def fit_bounds(self, bounds, padding_top_left=None,
padding_bottom_right=None, padding=None, max_zoom=None):
"""Fit the map to contain a bounding box with the maximum zoom level possible.
Parameters
----------
bounds: list of (latitude, longitude) points
Bounding box specified as two points [southwest, northeast]
padding_top_left: (x, y) point, default None
Padding in the top left corner. Useful if some elements in
the corner, such as controls, might obscure objects you're zooming
to.
padding_bottom_right: (x, y) point, default None
Padding in the bottom right corner.
padding: (x, y) point, default None
Equivalent to setting both top left and bottom right padding to
the same value.
max_zoom: int, default None
Maximum zoom to be used.
Example
-------
>>> map.fit_bounds([[52.193636, -2.221575], [52.636878, -1.139759]])
"""
options = {
'paddingTopLeft': padding_top_left,
'paddingBottomRight': padding_bottom_right,
'padding': padding,
'maxZoom': max_zoom,
}
fit_bounds_options = {}
for key, opt in options.items():
if opt:
fit_bounds_options[key] = opt
fit_bounds = self.env.get_template('fit_bounds.js')
fit_bounds_str = fit_bounds.render({
'bounds': json.dumps(bounds),
'fit_bounds_options': json.dumps(fit_bounds_options,
sort_keys=True),
})
self.template_vars.update({'fit_bounds': fit_bounds_str})
def add_plugin(self, plugin):
"""Adds a plugin to the map.
Parameters
----------
plugin: folium.plugins object
A plugin to be added to the map. It has to implement the
methods `render_html`, `render_css` and `render_js`.
"""
plugin.add_to_map(self)
def _auto_bounds(self):
if 'fit_bounds' in self.template_vars:
return
# Get count for each feature type
ft_names = ["marker", "line", "circle", "polygon", "multiline"]
ft_names = [i for i in ft_names if i in self.mark_cnt]
# Make a comprehensive list of all the features we want to fit
feat_str = ["{name}_{count}".format(name=ft_name,
count=self.mark_cnt[ft_name])
for ft_name in ft_names for
count in range(1, self.mark_cnt[ft_name]+1)]
feat_str = "[" + ', '.join(feat_str) + "]"
fit_bounds = self.env.get_template('fit_bounds.js')
fit_bounds_str = fit_bounds.render({
'autobounds': not self.location,
'features': feat_str,
'fit_bounds_options': json.dumps({'padding': [30, 30]}),
})
self.template_vars.update({'fit_bounds': fit_bounds_str.strip()})
def _popup_render(self, popup=None, mk_name=None, count=None,
width=300):
"""Popup renderer: either text or Vincent/Vega.
Parameters
----------
popup: str or Vincent tuple, default None
String for text popup, or tuple of (Vincent object, json_path)
mk_name: str, default None
Type of marker. Simple, Circle, etc.
count: int, default None
Count of marker
"""
if not popup:
return ''
else:
if sys.version_info >= (3, 0):
utype, stype = str, bytes
else:
utype, stype = unicode, str
if isinstance(popup, (utype, stype)):
popup_temp = self.env.get_template('simple_popup.js')
if isinstance(popup, utype):
popup_txt = popup.encode('ascii', 'xmlcharrefreplace')
else:
popup_txt = popup
if sys.version_info >= (3, 0):
popup_txt = popup_txt.decode()
pop_txt = json.dumps(str(popup_txt))
return popup_temp.render({'pop_name': mk_name + str(count),
'pop_txt': pop_txt, 'width': width})
elif isinstance(popup, tuple):
# Update template with JS libs.
vega_temp = self.env.get_template('vega_ref.txt').render()
jquery_temp = self.env.get_template('jquery_ref.txt').render()
d3_temp = self.env.get_template('d3_ref.txt').render()
vega_parse = self.env.get_template('vega_parse.js').render()
self.template_vars.update({'vega': vega_temp,
'd3': d3_temp,
'jquery': jquery_temp,
'vega_parse': vega_parse})
# Parameters for Vega template.
vega = popup[0]
mark = ''.join([mk_name, str(count)])
json_out = popup[1]
div_id = popup[1].split('.')[0]
width = vega.width
height = vega.height
if isinstance(vega.padding, dict):
width += vega.padding['left']+vega.padding['right']
height += vega.padding['top']+vega.padding['bottom']
else:
width += 75
height += 50
max_width = max([self.map_size['width'], width])
vega_id = '#' + div_id
popup_temp = self.env.get_template('vega_marker.js')
return popup_temp.render({'mark': mark, 'div_id': div_id,
'width': width, 'height': height,
'max_width': max_width,
'json_out': json_out,
'vega_id': vega_id})
else:
raise TypeError("Unrecognized popup type: {!r}".format(popup))
@iter_obj('geojson')
def geo_json(self, geo_path=None, geo_str=None, data_out='data.json',
data=None, columns=None, key_on=None, threshold_scale=None,
fill_color='blue', fill_opacity=0.6, line_color='black',
line_weight=1, line_opacity=1, legend_name=None,
topojson=None, reset=False):
"""Apply a GeoJSON overlay to the map.
Plot a GeoJSON overlay on the base map. There is no requirement
to bind data (passing just a GeoJSON plots a single-color overlay),
but there is a data binding option to map your columnar data to
different feature objects with a color scale.
If data is passed as a Pandas dataframe, the "columns" and "key-on"
keywords must be included, the first to indicate which DataFrame
columns to use, the second to indicate the layer in the GeoJSON
on which to key the data. The 'columns' keyword does not need to be
passed for a Pandas series.
Colors are generated from color brewer (http://colorbrewer2.org/)
sequential palettes on a D3 threshold scale. The scale defaults to the
following quantiles: [0, 0.5, 0.75, 0.85, 0.9]. A custom scale can be
passed to `threshold_scale` of length <=6, in order to match the
color brewer range.
TopoJSONs can be passed as "geo_path", but the "topojson" keyword must
also be passed with the reference to the topojson objects to convert.
See the topojson.feature method in the TopoJSON API reference:
https://github.com/mbostock/topojson/wiki/API-Reference
Parameters
----------
geo_path: string, default None
URL or File path to your GeoJSON data
geo_str: string, default None
String of GeoJSON, alternative to geo_path
data_out: string, default 'data.json'
Path to write Pandas DataFrame/Series to JSON if binding data
data: Pandas DataFrame or Series, default None
Data to bind to the GeoJSON.
columns: dict or tuple, default None
If the data is a Pandas DataFrame, the columns of data to be bound.
Must pass column 1 as the key, and column 2 the values.
key_on: string, default None
Variable in the GeoJSON file to bind the data to. Must always
start with 'feature' and be in JavaScript objection notation.
Ex: 'feature.id' or 'feature.properties.statename'.
threshold_scale: list, default None
Data range for D3 threshold scale. Defaults to the following range
of quantiles: [0, 0.5, 0.75, 0.85, 0.9], rounded to the nearest
order-of-magnitude integer. Ex: 270 rounds to 200, 5600 to 6000.
fill_color: string, default 'blue'
Area fill color. Can pass a hex code, color name, or if you are
binding data, one of the following color brewer palettes:
'BuGn', 'BuPu', 'GnBu', 'OrRd', 'PuBu', 'PuBuGn', 'PuRd', 'RdPu',
'YlGn', 'YlGnBu', 'YlOrBr', and 'YlOrRd'.
fill_opacity: float, default 0.6
Area fill opacity, range 0-1.
line_color: string, default 'black'
GeoJSON geopath line color.
line_weight: int, default 1
GeoJSON geopath line weight.
line_opacity: float, default 1
GeoJSON geopath line opacity, range 0-1.
legend_name: string, default None
Title for data legend. If not passed, defaults to columns[1].
topojson: string, default None
If using a TopoJSON, passing "objects.yourfeature" to the topojson
keyword argument will enable conversion to GeoJSON.
reset: boolean, default False
Remove all current geoJSON layers, start with new layer
Output
------
GeoJSON data layer in obj.template_vars
Example
-------
>>> m.geo_json(geo_path='us-states.json', line_color='blue',
line_weight=3)
>>> m.geo_json(geo_path='geo.json', data=df,
columns=['Data 1', 'Data 2'],
key_on='feature.properties.myvalue', fill_color='PuBu',
threshold_scale=[0, 20, 30, 40, 50, 60])
>>> m.geo_json(geo_path='countries.json', topojson='objects.countries')
"""
if reset:
reset_vars = ['json_paths', 'func_vars', 'color_scales',
'geo_styles', 'gjson_layers', 'map_legends',
'topo_convert']
for var in reset_vars:
self.template_vars.update({var: []})
self.mark_cnt['geojson'] = 1
def json_style(style_cnt, line_color, line_weight, line_opacity,
fill_color, fill_opacity, quant_fill):
"""Generate JSON styling function from template"""
style_temp = self.env.get_template('geojson_style.js')
style = style_temp.render({'style': style_cnt,
'line_color': line_color,
'line_weight': line_weight,
'line_opacity': line_opacity,
'fill_color': fill_color,
'fill_opacity': fill_opacity,
'quantize_fill': quant_fill})
return style
# Set map type to geojson.
self.map_type = 'geojson'
# Get JSON map layer template pieces, convert TopoJSON if necessary.
# geo_str is really a hack.
if geo_path:
geo_path = ".defer(d3.json, '{0}')".format(geo_path)
elif geo_str:
fmt = (".defer(function(callback)"
"{{callback(null, JSON.parse('{}'))}})").format
geo_path = fmt(geo_str)
if topojson is None:
map_var = '_'.join(['gjson', str(self.mark_cnt['geojson'])])
layer_var = map_var
else:
map_var = '_'.join(['tjson', str(self.mark_cnt['geojson'])])
topo_obj = '.'.join([map_var, topojson])
layer_var = '_'.join(['topo', str(self.mark_cnt['geojson'])])
topo_templ = self.env.get_template('topo_func.js')
topo_func = topo_templ.render({'map_var': layer_var,
't_var': map_var,
't_var_obj': topo_obj})
topo_lib = self.env.get_template('topojson_ref.txt').render()
self.template_vars.update({'topojson': topo_lib})
self.template_vars.setdefault('topo_convert',
[]).append(topo_func)
style_count = '_'.join(['style', str(self.mark_cnt['geojson'])])
# Get Data binding pieces if available.
if data is not None:
import pandas as pd
# Create DataFrame with only the relevant columns.
if isinstance(data, pd.DataFrame):
data = pd.concat([data[columns[0]], data[columns[1]]], axis=1)
# Save data to JSON.
self.json_data[data_out] = utilities.transform_data(data)
# Add data to queue.
d_path = ".defer(d3.json, '{0}')".format(data_out)
self.template_vars.setdefault('json_paths', []).append(d_path)
# Add data variable to makeMap function.
data_var = '_'.join(['data', str(self.mark_cnt['geojson'])])
self.template_vars.setdefault('func_vars', []).append(data_var)
# D3 Color scale.
series = data[columns[1]]
if threshold_scale and len(threshold_scale) > 6:
raise ValueError
domain = threshold_scale or utilities.split_six(series=series)
if len(domain) > 253:
raise ValueError('The threshold scale must be length <= 253')
if not utilities.color_brewer(fill_color):
raise ValueError('Please pass a valid color brewer code to '
'fill_local. See docstring for valid codes.')
palette = utilities.color_brewer(fill_color, len(domain))
d3range = palette[0: len(domain) + 1]
tick_labels = utilities.legend_scaler(domain)
color_temp = self.env.get_template('d3_threshold.js')
d3scale = color_temp.render({'domain': domain,
'range': d3range})
self.template_vars.setdefault('color_scales', []).append(d3scale)
# Create legend.
name = legend_name or columns[1]
leg_templ = self.env.get_template('d3_map_legend.js')
legend = leg_templ.render({'lin_max': int(domain[-1]*1.1),
'tick_labels': tick_labels,
'caption': name})
self.template_vars.setdefault('map_legends', []).append(legend)
# Style with color brewer colors.
matchColor = 'color(matchKey({0}, {1}))'.format(key_on, data_var)
style = json_style(style_count, line_color, line_weight,
line_opacity, None, fill_opacity, matchColor)
else:
style = json_style(style_count, line_color, line_weight,
line_opacity, fill_color, fill_opacity, None)
layer = ('gJson_layer_{0} = L.geoJson({1}, {{style: {2},'
'onEachFeature: onEachFeature}}).addTo(map)'
.format(self.mark_cnt['geojson'], layer_var, style_count))
self.template_vars.setdefault('json_paths', []).append(geo_path)
self.template_vars.setdefault('func_vars', []).append(map_var)
self.template_vars.setdefault('geo_styles', []).append(style)
self.template_vars.setdefault('gjson_layers', []).append(layer)
@iter_obj('image_overlay')
def image_overlay(self, data, opacity=0.25, min_lat=-90.0, max_lat=90.0,
min_lon=-180.0, max_lon=180.0, image_name=None,
filename=None):
"""
Simple image overlay of raster data from a numpy array. This is a
lightweight way to overlay geospatial data on top of a map. If your
data is high res, consider implementing a WMS server and adding a WMS
layer.
This function works by generating a PNG file from a numpy array. If
you do not specify a filename, it will embed the image inline.
Otherwise, it saves the file in the current directory, and then adds
it as an image overlay layer in leaflet.js. By default, the image is
placed and stretched using bounds that cover the entire globe.
Parameters
----------
data: numpy array OR url string, required.
if numpy array, must be a image format,
i.e., NxM (mono), NxMx3 (rgb), or NxMx4 (rgba)
if url, must be a valid url to a image (local or external)
opacity: float, default 0.25
Image layer opacity in range 0 (transparent) to 1 (opaque)
min_lat: float, default -90.0
max_lat: float, default 90.0
min_lon: float, default -180.0
max_lon: float, default 180.0
image_name: string, default None
The name of the layer object in leaflet.js
filename: string, default None
Optional file name of output.png for image overlay.
Use `None` for inline PNG.
Output
------
Image overlay data layer in obj.template_vars
Examples
-------
# assumes a map object `m` has been created
>>> import numpy as np
>>> data = np.random.random((100,100))
# to make a rgba from a specific matplotlib colormap:
>>> import matplotlib.cm as cm
>>> cmapper = cm.cm.ColorMapper('jet')
>>> data2 = cmapper.to_rgba(np.random.random((100,100)))
>>> # Place the data over all of the globe (will be pretty pixelated!)
>>> m.image_overlay(data)
>>> # Put it only over a single city (Paris).
>>> m.image_overlay(data, min_lat=48.80418, max_lat=48.90970,
... min_lon=2.25214, max_lon=2.44731)
"""
if isinstance(data, str):
filename = data
else:
try:
png_str = utilities.write_png(data)
except Exception as e:
raise e
if filename is not None:
with open(filename, 'wb') as fd:
fd.write(png_str)
else:
png = "data:image/png;base64,{}".format
filename = png(base64.b64encode(png_str).decode('utf-8'))
if image_name not in self.added_layers:
if image_name is None:
image_name = "Image_Overlay"
else:
image_name = image_name.replace(" ", "_")
image_url = filename
image_bounds = [[min_lat, min_lon], [max_lat, max_lon]]
image_opacity = opacity
image_temp = self.env.get_template('image_layer.js')
image = image_temp.render({'image_name': image_name,
'image_url': image_url,
'image_bounds': image_bounds,
'image_opacity': image_opacity})
self.template_vars['image_layers'].append(image)
self.added_layers.append(image_name)
def _build_map(self, html_templ=None, templ_type='string'):
self._auto_bounds()
"""Build HTML/JS/CSS from Templates given current map type."""
if html_templ is None:
map_types = {'base': 'fol_template.html',
'geojson': 'geojson_template.html'}
# Check current map type.
type_temp = map_types[self.map_type]
html_templ = self.env.get_template(type_temp)
else:
if templ_type == 'string':
html_templ = self.env.from_string(html_templ)
self.HTML = html_templ.render(self.template_vars, plugins=self.plugins)
def create_map(self, path='map.html', plugin_data_out=True, template=None):
"""Write Map output to HTML and data output to JSON if available.
Parameters:
-----------
path: string, default 'map.html'
Path for HTML output for map
plugin_data_out: boolean, default True
If using plugins such as awesome markers, write all plugin
data such as JS/CSS/images to path
template: string, default None
Custom template to render
"""
self.map_path = path
self._build_map(template)
with codecs.open(path, 'w', 'utf8') as f:
f.write(self.HTML)
if self.json_data:
for path, data in iteritems(self.json_data):
with open(path, 'w') as g:
json.dump(data, g)
if self.plugins and plugin_data_out:
for name, plugin in iteritems(self.plugins):
with open(name, 'w') as f:
if isinstance(plugin, binary_type):
plugin = text_type(plugin, 'utf8')
f.write(plugin)
def _repr_html_(self):
"""Build the HTML representation for IPython."""
map_types = {'base': 'ipynb_repr.html',
'geojson': 'ipynb_iframe.html'}
# Check current map type.
type_temp = map_types[self.map_type]
if self.render_iframe:
type_temp = 'ipynb_iframe.html'
templ = self.env.get_template(type_temp)
self._build_map(html_templ=templ, templ_type='temp')
if self.map_type == 'geojson' or self.render_iframe:
if not self.map_path:
raise ValueError('Use create_map to set the path!')
return templ.render(path=self.map_path, width=self.width,
height=self.height)
return self.HTML
def display(self):
"""Display the visualization inline in the IPython notebook.
This is deprecated, use the following instead::
from IPython.display import display
display(viz)
"""
from IPython.core.display import display, HTML
display(HTML(self._repr_html_()))
| mit |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/matplotlib/backends/backend_ps.py | 2 | 60197 | """
A PostScript backend, which can produce both PostScript .ps and .eps
"""
# PY3KTODO: Get rid of "print >>fh" syntax
from __future__ import division, print_function
import glob, math, os, shutil, sys, time
def _fn_name(): return sys._getframe(1).f_code.co_name
import io
if sys.version_info[0] < 3:
import cStringIO
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
from tempfile import mkstemp
from matplotlib import verbose, __version__, rcParams
from matplotlib._pylab_helpers import Gcf
from matplotlib.afm import AFM
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like, get_realpath_and_stat, \
is_writable_file_like, maxdict
from matplotlib.mlab import quad2cubic
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, is_opentype_cff_font
from matplotlib.ft2font import FT2Font, KERNING_DEFAULT, LOAD_NO_HINTING
from matplotlib.ttconv import convert_ttf_to_ps
from matplotlib.mathtext import MathTextParser
from matplotlib._mathtext_data import uni2type1
from matplotlib.text import Text
from matplotlib.path import Path
from matplotlib.transforms import Affine2D
from matplotlib.backends.backend_mixed import MixedModeRenderer
import numpy as np
import binascii
import re
try:
set
except NameError:
from sets import Set as set
if sys.platform.startswith('win'): cmd_split = '&'
else: cmd_split = ';'
backend_version = 'Level II'
debugPS = 0
class PsBackendHelper(object):
def __init__(self):
self._cached = {}
@property
def gs_exe(self):
"""
excutable name of ghostscript.
"""
try:
return self._cached["gs_exe"]
except KeyError:
pass
if sys.platform == 'win32': gs_exe = 'gswin32c'
else: gs_exe = 'gs'
self._cached["gs_exe"] = gs_exe
return gs_exe
@property
def gs_version(self):
"""
version of ghostscript.
"""
try:
return self._cached["gs_version"]
except KeyError:
pass
from subprocess import Popen, PIPE
pipe = Popen(self.gs_exe + " --version",
shell=True, stdout=PIPE).stdout
if sys.version_info[0] >= 3:
ver = pipe.read().decode('ascii')
else:
ver = pipe.read()
gs_version = tuple(map(int, ver.strip().split(".")))
self._cached["gs_version"] = gs_version
return gs_version
@property
def supports_ps2write(self):
"""
True if the installed ghostscript supports ps2write device.
"""
return self.gs_version[0] >= 9
ps_backend_helper = PsBackendHelper()
papersize = {'letter': (8.5,11),
'legal': (8.5,14),
'ledger': (11,17),
'a0': (33.11,46.81),
'a1': (23.39,33.11),
'a2': (16.54,23.39),
'a3': (11.69,16.54),
'a4': (8.27,11.69),
'a5': (5.83,8.27),
'a6': (4.13,5.83),
'a7': (2.91,4.13),
'a8': (2.07,2.91),
'a9': (1.457,2.05),
'a10': (1.02,1.457),
'b0': (40.55,57.32),
'b1': (28.66,40.55),
'b2': (20.27,28.66),
'b3': (14.33,20.27),
'b4': (10.11,14.33),
'b5': (7.16,10.11),
'b6': (5.04,7.16),
'b7': (3.58,5.04),
'b8': (2.51,3.58),
'b9': (1.76,2.51),
'b10': (1.26,1.76)}
def _get_papertype(w, h):
keys = papersize.keys()
keys.sort()
keys.reverse()
for key in keys:
if key.startswith('l'): continue
pw, ph = papersize[key]
if (w < pw) and (h < ph): return key
else:
return 'a0'
def _num_to_str(val):
if is_string_like(val): return val
ival = int(val)
if val==ival: return str(ival)
s = "%1.3f"%val
s = s.rstrip("0")
s = s.rstrip(".")
return s
def _nums_to_str(*args):
return ' '.join(map(_num_to_str,args))
def quote_ps_string(s):
"Quote dangerous characters of S for use in a PostScript string constant."
s=s.replace("\\", "\\\\")
s=s.replace("(", "\\(")
s=s.replace(")", "\\)")
s=s.replace("'", "\\251")
s=s.replace("`", "\\301")
s=re.sub(r"[^ -~\n]", lambda x: r"\%03o"%ord(x.group()), s)
return s
def seq_allequal(seq1, seq2):
"""
seq1 and seq2 are either None or sequences or arrays
Return True if both are None or both are seqs with identical
elements
"""
if seq1 is None:
return seq2 is None
if seq2 is None:
return False
#ok, neither are None:, assuming iterable
if len(seq1) != len(seq2): return False
return np.alltrue(np.equal(seq1, seq2))
class RendererPS(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles.
"""
fontd = maxdict(50)
afmfontd = maxdict(50)
def __init__(self, width, height, pswriter, imagedpi=72):
"""
Although postscript itself is dpi independent, we need to
imform the image code about a requested dpi to generate high
res images and them scale them before embeddin them
"""
RendererBase.__init__(self)
self.width = width
self.height = height
self._pswriter = pswriter
if rcParams['text.usetex']:
self.textcnt = 0
self.psfrag = []
self.imagedpi = imagedpi
# current renderer state (None=uninitialised)
self.color = None
self.linewidth = None
self.linejoin = None
self.linecap = None
self.linedash = None
self.fontname = None
self.fontsize = None
self._hatches = {}
self.image_magnification = imagedpi/72.0
self._clip_paths = {}
self._path_collection_id = 0
self.used_characters = {}
self.mathtext_parser = MathTextParser("PS")
self._afm_font_dir = os.path.join(
rcParams['datapath'], 'fonts', 'afm')
def track_characters(self, font, s):
"""Keeps track of which characters are required from
each font."""
realpath, stat_key = get_realpath_and_stat(font.fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update([ord(x) for x in s])
def merge_used_characters(self, other):
for stat_key, (realpath, charset) in other.iteritems():
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update(charset)
def set_color(self, r, g, b, store=1):
if (r,g,b) != self.color:
if r==g and r==b:
self._pswriter.write("%1.3f setgray\n"%r)
else:
self._pswriter.write("%1.3f %1.3f %1.3f setrgbcolor\n"%(r,g,b))
if store: self.color = (r,g,b)
def set_linewidth(self, linewidth, store=1):
if linewidth != self.linewidth:
self._pswriter.write("%1.3f setlinewidth\n"%linewidth)
if store: self.linewidth = linewidth
def set_linejoin(self, linejoin, store=1):
if linejoin != self.linejoin:
self._pswriter.write("%d setlinejoin\n"%linejoin)
if store: self.linejoin = linejoin
def set_linecap(self, linecap, store=1):
if linecap != self.linecap:
self._pswriter.write("%d setlinecap\n"%linecap)
if store: self.linecap = linecap
def set_linedash(self, offset, seq, store=1):
if self.linedash is not None:
oldo, oldseq = self.linedash
if seq_allequal(seq, oldseq): return
if seq is not None and len(seq):
s="[%s] %d setdash\n"%(_nums_to_str(*seq), offset)
self._pswriter.write(s)
else:
self._pswriter.write("[] 0 setdash\n")
if store: self.linedash = (offset,seq)
def set_font(self, fontname, fontsize, store=1):
if rcParams['ps.useafm']: return
if (fontname,fontsize) != (self.fontname,self.fontsize):
out = ("/%s findfont\n"
"%1.3f scalefont\n"
"setfont\n" % (fontname,fontsize))
self._pswriter.write(out)
if store: self.fontname = fontname
if store: self.fontsize = fontsize
def create_hatch(self, hatch):
sidelen = 72
if hatch in self._hatches:
return self._hatches[hatch]
name = 'H%d' % len(self._hatches)
self._pswriter.write("""\
<< /PatternType 1
/PaintType 2
/TilingType 2
/BBox[0 0 %(sidelen)d %(sidelen)d]
/XStep %(sidelen)d
/YStep %(sidelen)d
/PaintProc {
pop
0 setlinewidth
""" % locals())
self._pswriter.write(
self._convert_path(Path.hatch(hatch), Affine2D().scale(72.0),
simplify=False))
self._pswriter.write("""\
stroke
} bind
>>
matrix
makepattern
/%(name)s exch def
""" % locals())
self._hatches[hatch] = name
return name
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
"""
if rcParams['text.usetex']:
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
if ismath:
width, height, descent, pswriter, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
return width, height, descent
if rcParams['ps.useafm']:
if ismath: s = s[1:-1]
font = self._get_font_afm(prop)
l,b,w,h,d = font.get_str_bbox_and_descent(s)
fontsize = prop.get_size_in_points()
scale = 0.001*fontsize
w *= scale
h *= scale
d *= scale
return w, h, d
font = self._get_font_ttf(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
d = font.get_descent()
d /= 64.0
#print s, w, h
return w, h, d
def flipy(self):
'return true if small y numbers are top for renderer'
return False
def _get_font_afm(self, prop):
key = hash(prop)
font = self.afmfontd.get(key)
if font is None:
fname = findfont(prop, fontext='afm', directory=self._afm_font_dir)
if fname is None:
fname = findfont(
"Helvetica", fontext='afm', directory=self._afm_font_dir)
font = self.afmfontd.get(fname)
if font is None:
with open(fname, 'rb') as fh:
font = AFM(fh)
self.afmfontd[fname] = font
self.afmfontd[key] = font
return font
def _get_font_ttf(self, prop):
key = hash(prop)
font = self.fontd.get(key)
if font is None:
fname = findfont(prop)
font = self.fontd.get(fname)
if font is None:
font = FT2Font(str(fname))
self.fontd[fname] = font
self.fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, 72.0)
return font
def _rgba(self, im):
return im.as_rgba_str()
def _rgb(self, im):
h,w,s = im.as_rgba_str()
rgba = np.fromstring(s, np.uint8)
rgba.shape = (h, w, 4)
rgb = rgba[:,:,:3]
return h, w, rgb.tostring()
def _gray(self, im, rc=0.3, gc=0.59, bc=0.11):
rgbat = im.as_rgba_str()
rgba = np.fromstring(rgbat[2], np.uint8)
rgba.shape = (rgbat[0], rgbat[1], 4)
rgba_f = rgba.astype(np.float32)
r = rgba_f[:,:,0]
g = rgba_f[:,:,1]
b = rgba_f[:,:,2]
gray = (r*rc + g*gc + b*bc).astype(np.uint8)
return rgbat[0], rgbat[1], gray.tostring()
def _hex_lines(self, s, chars_per_line=128):
s = binascii.b2a_hex(s)
nhex = len(s)
lines = []
for i in range(0,nhex,chars_per_line):
limit = min(i+chars_per_line, nhex)
lines.append(s[i:limit])
return lines
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to draw_image.
Allows a backend to have images at a different resolution to other
artists.
"""
return self.image_magnification
def option_scale_image(self):
"""
ps backend support arbitrary scaling of image.
"""
return True
def _get_image_h_w_bits_command(self, im):
if im.is_grayscale:
h, w, bits = self._gray(im)
imagecmd = "image"
else:
h, w, bits = self._rgb(im)
imagecmd = "false 3 colorimage"
return h, w, bits, imagecmd
def draw_image(self, gc, x, y, im, dx=None, dy=None, transform=None):
"""
Draw the Image instance into the current axes; x is the
distance in pixels from the left hand side of the canvas and y
is the distance from bottom
dx, dy is the width and height of the image. If a transform
(which must be an affine transform) is given, x, y, dx, dy are
interpreted as the coordinate of the transform.
"""
im.flipud_out()
h, w, bits, imagecmd = self._get_image_h_w_bits_command(im)
hexlines = b'\n'.join(self._hex_lines(bits)).decode('ascii')
if dx is None:
xscale = w / self.image_magnification
else:
xscale = dx
if dy is None:
yscale = h/self.image_magnification
else:
yscale = dy
if transform is None:
matrix = "1 0 0 1 0 0"
else:
matrix = " ".join(map(str, transform.to_values()))
figh = self.height*72
#print 'values', origin, flipud, figh, h, y
bbox = gc.get_clip_rectangle()
clippath, clippath_trans = gc.get_clip_path()
clip = []
if bbox is not None:
clipx,clipy,clipw,cliph = bbox.bounds
clip.append('%s clipbox' % _nums_to_str(clipw, cliph, clipx, clipy))
if clippath is not None:
id = self._get_clip_path(clippath, clippath_trans)
clip.append('%s' % id)
clip = '\n'.join(clip)
#y = figh-(y+h)
ps = """gsave
%(clip)s
[%(matrix)s] concat
%(x)s %(y)s translate
%(xscale)s %(yscale)s scale
/DataString %(w)s string def
%(w)s %(h)s 8 [ %(w)s 0 0 -%(h)s 0 %(h)s ]
{
currentfile DataString readhexstring pop
} bind %(imagecmd)s
%(hexlines)s
grestore
""" % locals()
self._pswriter.write(ps)
# unflip
im.flipud_out()
def _convert_path(self, path, transform, clip=False, simplify=None):
ps = []
last_points = None
if clip:
clip = (0.0, 0.0, self.width * 72.0,
self.height * 72.0)
else:
clip = None
for points, code in path.iter_segments(transform, clip=clip,
simplify=simplify):
if code == Path.MOVETO:
ps.append("%g %g m" % tuple(points))
elif code == Path.CLOSEPOLY:
ps.append("cl")
elif last_points is None:
# The other operations require a previous point
raise ValueError('Path lacks initial MOVETO')
elif code == Path.LINETO:
ps.append("%g %g l" % tuple(points))
elif code == Path.CURVE3:
points = quad2cubic(*(list(last_points[-2:]) + list(points)))
ps.append("%g %g %g %g %g %g c" %
tuple(points[2:]))
elif code == Path.CURVE4:
ps.append("%g %g %g %g %g %g c" % tuple(points))
last_points = points
ps = "\n".join(ps)
return ps
def _get_clip_path(self, clippath, clippath_transform):
id = self._clip_paths.get((clippath, clippath_transform))
if id is None:
id = 'c%x' % len(self._clip_paths)
ps_cmd = ['/%s {' % id]
ps_cmd.append(self._convert_path(clippath, clippath_transform,
simplify=False))
ps_cmd.extend(['clip', 'newpath', '} bind def\n'])
self._pswriter.write('\n'.join(ps_cmd))
self._clip_paths[(clippath, clippath_transform)] = id
return id
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a Path instance using the given affine transform.
"""
clip = (rgbFace is None and gc.get_hatch_path() is None)
simplify = path.should_simplify and clip
ps = self._convert_path(
path, transform, clip=clip, simplify=simplify)
self._draw_ps(ps, gc, rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
"""
Draw the markers defined by path at each of the positions in x
and y. path coordinates are points, x and y coords will be
transformed by the transform
"""
if debugPS: self._pswriter.write('% draw_markers \n')
write = self._pswriter.write
if rgbFace:
if rgbFace[0]==rgbFace[1] and rgbFace[0]==rgbFace[2]:
ps_color = '%1.3f setgray' % rgbFace[0]
else:
ps_color = '%1.3f %1.3f %1.3f setrgbcolor' % rgbFace
# construct the generic marker command:
ps_cmd = ['/o {', 'gsave', 'newpath', 'translate'] # dont want the translate to be global
lw = gc.get_linewidth()
stroke = lw != 0.0
if stroke:
ps_cmd.append('%.1f setlinewidth' % lw)
jint = gc.get_joinstyle()
ps_cmd.append('%d setlinejoin' % jint)
cint = gc.get_capstyle()
ps_cmd.append('%d setlinecap' % cint)
ps_cmd.append(self._convert_path(marker_path, marker_trans,
simplify=False))
if rgbFace:
if stroke:
ps_cmd.append('gsave')
ps_cmd.extend([ps_color, 'fill'])
if stroke:
ps_cmd.append('grestore')
if stroke:
ps_cmd.append('stroke')
ps_cmd.extend(['grestore', '} bind def'])
for vertices, code in path.iter_segments(trans, simplify=False):
if len(vertices):
x, y = vertices[-2:]
ps_cmd.append("%g %g o" % (x, y))
ps = '\n'.join(ps_cmd)
self._draw_ps(ps, gc, rgbFace, fill=False, stroke=False)
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
write = self._pswriter.write
path_codes = []
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
name = 'p%x_%x' % (self._path_collection_id, i)
ps_cmd = ['/%s {' % name,
'newpath', 'translate']
ps_cmd.append(self._convert_path(path, transform, simplify=False))
ps_cmd.extend(['} bind def\n'])
write('\n'.join(ps_cmd))
path_codes.append(name)
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_codes, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
ps = "%g %g %s" % (xo, yo, path_id)
self._draw_ps(ps, gc0, rgbFace)
self._path_collection_id += 1
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!'):
"""
draw a Text instance
"""
w, h, bl = self.get_text_width_height_descent(s, prop, ismath)
fontsize = prop.get_size_in_points()
thetext = 'psmarker%d' % self.textcnt
color = '%1.3f,%1.3f,%1.3f'% gc.get_rgb()[:3]
fontcmd = {'sans-serif' : r'{\sffamily %s}',
'monospace' : r'{\ttfamily %s}'}.get(
rcParams['font.family'], r'{\rmfamily %s}')
s = fontcmd % s
tex = r'\color[rgb]{%s} %s' % (color, s)
corr = 0#w/2*(fontsize-10)/10
if rcParams['text.latex.preview']:
# use baseline alignment!
pos = _nums_to_str(x-corr, y+bl)
self.psfrag.append(r'\psfrag{%s}[Bl][Bl][1][%f]{\fontsize{%f}{%f}%s}'%(thetext, angle, fontsize, fontsize*1.25, tex))
else:
# stick to the bottom alignment, but this may give incorrect baseline some times.
pos = _nums_to_str(x-corr, y)
self.psfrag.append(r'\psfrag{%s}[bl][bl][1][%f]{\fontsize{%f}{%f}%s}'%(thetext, angle, fontsize, fontsize*1.25, tex))
ps = """\
gsave
%(pos)s moveto
(%(thetext)s)
show
grestore
""" % locals()
self._pswriter.write(ps)
self.textcnt += 1
def draw_text(self, gc, x, y, s, prop, angle, ismath):
"""
draw a Text instance
"""
# local to avoid repeated attribute lookups
write = self._pswriter.write
if debugPS:
write("% text\n")
if ismath=='TeX':
return self.tex(gc, x, y, s, prop, angle)
elif ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
elif rcParams['ps.useafm']:
self.set_color(*gc.get_rgb())
font = self._get_font_afm(prop)
fontname = font.get_fontname()
fontsize = prop.get_size_in_points()
scale = 0.001*fontsize
thisx = 0
thisy = font.get_str_bbox_and_descent(s)[4] * scale
last_name = None
lines = []
for c in s:
name = uni2type1.get(ord(c), 'question')
try:
width = font.get_width_from_char_name(name)
except KeyError:
name = 'question'
width = font.get_width_char('?')
if last_name is not None:
kern = font.get_kern_dist_from_name(last_name, name)
else:
kern = 0
last_name = name
thisx += kern * scale
lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
thisx += width * scale
thetext = "\n".join(lines)
ps = """\
gsave
/%(fontname)s findfont
%(fontsize)s scalefont
setfont
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0, flags=LOAD_NO_HINTING)
self.track_characters(font, s)
self.set_color(*gc.get_rgb())
self.set_font(font.get_sfnt()[(1,0,0,6)], prop.get_size_in_points())
cmap = font.get_charmap()
lastgind = None
#print 'text', s
lines = []
thisx = 0
thisy = font.get_descent() / 64.0
for c in s:
ccode = ord(c)
gind = cmap.get(ccode)
if gind is None:
ccode = ord('?')
name = '.notdef'
gind = 0
else:
name = font.get_glyph_name(gind)
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
if lastgind is not None:
kern = font.get_kerning(lastgind, gind, KERNING_DEFAULT)
else:
kern = 0
lastgind = gind
thisx += kern/64.0
lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
thisx += glyph.linearHoriAdvance/65536.0
thetext = '\n'.join(lines)
ps = """gsave
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
def new_gc(self):
return GraphicsContextPS()
def draw_mathtext(self, gc,
x, y, s, prop, angle):
"""
Draw the math text using matplotlib.mathtext
"""
if debugPS:
self._pswriter.write("% mathtext\n")
width, height, descent, pswriter, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
self.merge_used_characters(used_characters)
self.set_color(*gc.get_rgb())
thetext = pswriter.getvalue()
ps = """gsave
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
def draw_gouraud_triangle(self, gc, points, colors, trans):
self.draw_gouraud_triangles(gc, points.reshape((1, 3, 2)),
colors.reshape((1, 3, 4)), trans)
def draw_gouraud_triangles(self, gc, points, colors, trans):
assert len(points) == len(colors)
assert points.ndim == 3
assert points.shape[1] == 3
assert points.shape[2] == 2
assert colors.ndim == 3
assert colors.shape[1] == 3
assert colors.shape[2] == 4
points = trans.transform(points)
shape = points.shape
flat_points = points.reshape((shape[0] * shape[1], 2))
flat_colors = colors.reshape((shape[0] * shape[1], 4))
points_min = np.min(flat_points, axis=0) - (1 << 8)
points_max = np.max(flat_points, axis=0) + (1 << 8)
factor = float(0xffffffff) / (points_max - points_min)
xmin, ymin = points_min
xmax, ymax = points_max
streamarr = np.empty(
(shape[0] * shape[1],),
dtype=[('flags', 'u1'),
('points', '>u4', (2,)),
('colors', 'u1', (3,))])
streamarr['flags'] = 0
streamarr['points'] = (flat_points - points_min) * factor
streamarr['colors'] = flat_colors[:, :3] * 255.0
stream = quote_ps_string(streamarr.tostring())
self._pswriter.write("""
gsave
<< /ShadingType 4
/ColorSpace [/DeviceRGB]
/BitsPerCoordinate 32
/BitsPerComponent 8
/BitsPerFlag 8
/AntiAlias true
/Decode [ %(xmin)f %(xmax)f %(ymin)f %(ymax)f 0 1 0 1 0 1 ]
/DataSource (%(stream)s)
>>
shfill
grestore
""" % locals())
def _draw_ps(self, ps, gc, rgbFace, fill=True, stroke=True, command=None):
"""
Emit the PostScript sniplet 'ps' with all the attributes from 'gc'
applied. 'ps' must consist of PostScript commands to construct a path.
The fill and/or stroke kwargs can be set to False if the
'ps' string already includes filling and/or stroking, in
which case _draw_ps is just supplying properties and
clipping.
"""
# local variable eliminates all repeated attribute lookups
write = self._pswriter.write
if debugPS and command:
write("% "+command+"\n")
mightstroke = gc.shouldstroke()
stroke = stroke and mightstroke
fill = (fill and rgbFace is not None and
(len(rgbFace) <= 3 or rgbFace[3] != 0.0))
if mightstroke:
self.set_linewidth(gc.get_linewidth())
jint = gc.get_joinstyle()
self.set_linejoin(jint)
cint = gc.get_capstyle()
self.set_linecap(cint)
self.set_linedash(*gc.get_dashes())
self.set_color(*gc.get_rgb()[:3])
write('gsave\n')
cliprect = gc.get_clip_rectangle()
if cliprect:
x,y,w,h=cliprect.bounds
write('%1.4g %1.4g %1.4g %1.4g clipbox\n' % (w,h,x,y))
clippath, clippath_trans = gc.get_clip_path()
if clippath:
id = self._get_clip_path(clippath, clippath_trans)
write('%s\n' % id)
# Jochen, is the strip necessary? - this could be a honking big string
write(ps.strip())
write("\n")
if fill:
if stroke:
write("gsave\n")
self.set_color(store=0, *rgbFace[:3])
write("fill\n")
if stroke:
write("grestore\n")
hatch = gc.get_hatch()
if hatch:
hatch_name = self.create_hatch(hatch)
write("gsave\n")
write("[/Pattern [/DeviceRGB]] setcolorspace %f %f %f " % gc.get_rgb()[:3])
write("%s setcolor fill grestore\n" % hatch_name)
if stroke:
write("stroke\n")
write("grestore\n")
class GraphicsContextPS(GraphicsContextBase):
def get_capstyle(self):
return {'butt':0,
'round':1,
'projecting':2}[GraphicsContextBase.get_capstyle(self)]
def get_joinstyle(self):
return {'miter':0,
'round':1,
'bevel':2}[GraphicsContextBase.get_joinstyle(self)]
def shouldstroke(self):
return (self.get_linewidth() > 0.0 and
(len(self.get_rgb()) <= 3 or self.get_rgb()[3] != 0.0))
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasPS(figure)
manager = FigureManagerPS(canvas, num)
return manager
class FigureCanvasPS(FigureCanvasBase):
_renderer_class = RendererPS
def draw(self):
pass
filetypes = {'ps' : 'Postscript',
'eps' : 'Encapsulated Postscript'}
def get_default_filetype(self):
return 'ps'
def print_ps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'ps', *args, **kwargs)
def print_eps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'eps', *args, **kwargs)
def _print_ps(self, outfile, format, *args, **kwargs):
papertype = kwargs.pop("papertype", rcParams['ps.papersize'])
papertype = papertype.lower()
if papertype == 'auto':
pass
elif papertype not in papersize:
raise RuntimeError( '%s is not a valid papertype. Use one \
of %s'% (papertype, ', '.join( papersize.iterkeys() )) )
orientation = kwargs.pop("orientation", "portrait").lower()
if orientation == 'landscape': isLandscape = True
elif orientation == 'portrait': isLandscape = False
else: raise RuntimeError('Orientation must be "portrait" or "landscape"')
self.figure.set_dpi(72) # Override the dpi kwarg
imagedpi = kwargs.pop("dpi", 72)
facecolor = kwargs.pop("facecolor", "w")
edgecolor = kwargs.pop("edgecolor", "w")
if rcParams['text.usetex']:
self._print_figure_tex(outfile, format, imagedpi, facecolor, edgecolor,
orientation, isLandscape, papertype,
**kwargs)
else:
self._print_figure(outfile, format, imagedpi, facecolor, edgecolor,
orientation, isLandscape, papertype,
**kwargs)
def _print_figure(self, outfile, format, dpi=72, facecolor='w', edgecolor='w',
orientation='portrait', isLandscape=False, papertype=None,
**kwargs):
"""
Render the figure to hardcopy. Set the figure patch face and
edge colors. This is useful because some of the GUIs have a
gray figure face color background and you'll probably want to
override this on hardcopy
If outfile is a string, it is interpreted as a file name.
If the extension matches .ep* write encapsulated postscript,
otherwise write a stand-alone PostScript file.
If outfile is a file object, a stand-alone PostScript file is
written into this file object.
"""
isEPSF = format == 'eps'
passed_in_file_object = False
if is_string_like(outfile):
title = outfile
elif is_writable_file_like(outfile):
title = None
passed_in_file_object = True
else:
raise ValueError("outfile must be a path or a file-like object")
# find the appropriate papertype
width, height = self.figure.get_size_inches()
if papertype == 'auto':
if isLandscape: papertype = _get_papertype(height, width)
else: papertype = _get_papertype(width, height)
if isLandscape: paperHeight, paperWidth = papersize[papertype]
else: paperWidth, paperHeight = papersize[papertype]
if rcParams['ps.usedistiller'] and not papertype == 'auto':
# distillers will improperly clip eps files if the pagesize is
# too small
if width>paperWidth or height>paperHeight:
if isLandscape:
papertype = _get_papertype(height, width)
paperHeight, paperWidth = papersize[papertype]
else:
papertype = _get_papertype(width, height)
paperWidth, paperHeight = papersize[papertype]
# center the figure on the paper
xo = 72*0.5*(paperWidth - width)
yo = 72*0.5*(paperHeight - height)
l, b, w, h = self.figure.bbox.bounds
llx = xo
lly = yo
urx = llx + w
ury = lly + h
rotation = 0
if isLandscape:
llx, lly, urx, ury = lly, llx, ury, urx
xo, yo = 72*paperHeight - yo, xo
rotation = 90
bbox = (llx, lly, urx, ury)
# generate PostScript code for the figure and store it in a string
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
dryrun = kwargs.get("dryrun", False)
if dryrun:
class NullWriter(object):
def write(self, *kl, **kwargs):
pass
self._pswriter = NullWriter()
else:
if sys.version_info[0] >= 3:
self._pswriter = io.StringIO()
else:
self._pswriter = cStringIO.StringIO()
# mixed mode rendering
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
ps_renderer = self._renderer_class(width, height, self._pswriter,
imagedpi=dpi)
renderer = MixedModeRenderer(self.figure,
width, height, dpi, ps_renderer,
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
if dryrun: # return immediately if dryrun (tightbbox=True)
return
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
fd, tmpfile = mkstemp()
with io.open(fd, 'wb') as raw_fh:
if sys.version_info[0] >= 3:
fh = io.TextIOWrapper(raw_fh, encoding="ascii")
else:
fh = raw_fh
# write the PostScript headers
if isEPSF: print("%!PS-Adobe-3.0 EPSF-3.0", file=fh)
else: print("%!PS-Adobe-3.0", file=fh)
if title: print("%%Title: "+title, file=fh)
print(("%%Creator: matplotlib version "
+__version__+", http://matplotlib.org/"), file=fh)
print("%%CreationDate: "+time.ctime(time.time()), file=fh)
print("%%Orientation: " + orientation, file=fh)
if not isEPSF: print("%%DocumentPaperSizes: "+papertype, file=fh)
print("%%%%BoundingBox: %d %d %d %d" % bbox, file=fh)
if not isEPSF: print("%%Pages: 1", file=fh)
print("%%EndComments", file=fh)
Ndict = len(psDefs)
print("%%BeginProlog", file=fh)
if not rcParams['ps.useafm']:
Ndict += len(ps_renderer.used_characters)
print("/mpldict %d dict def"%Ndict, file=fh)
print("mpldict begin", file=fh)
for d in psDefs:
d=d.strip()
for l in d.split('\n'):
print(l.strip(), file=fh)
if not rcParams['ps.useafm']:
for font_filename, chars in ps_renderer.used_characters.itervalues():
if len(chars):
font = FT2Font(str(font_filename))
cmap = font.get_charmap()
glyph_ids = []
for c in chars:
gind = cmap.get(c) or 0
glyph_ids.append(gind)
fonttype = rcParams['ps.fonttype']
# Can not use more than 255 characters from a
# single font for Type 3
if len(glyph_ids) > 255:
fonttype = 42
# The ttf to ps (subsetting) support doesn't work for
# OpenType fonts that are Postscript inside (like the
# STIX fonts). This will simply turn that off to avoid
# errors.
if is_opentype_cff_font(font_filename):
raise RuntimeError("OpenType CFF fonts can not be saved using the internal Postscript backend at this time.\nConsider using the Cairo backend.")
else:
fh.flush()
convert_ttf_to_ps(font_filename, raw_fh, fonttype, glyph_ids)
print("end", file=fh)
print("%%EndProlog", file=fh)
if not isEPSF: print("%%Page: 1 1", file=fh)
print("mpldict begin", file=fh)
#print >>fh, "gsave"
print("%s translate"%_nums_to_str(xo, yo), file=fh)
if rotation: print("%d rotate"%rotation, file=fh)
print("%s clipbox"%_nums_to_str(width*72, height*72, 0, 0), file=fh)
# write the figure
print(self._pswriter.getvalue(), file=fh)
# write the trailer
#print >>fh, "grestore"
print("end", file=fh)
print("showpage", file=fh)
if not isEPSF: print("%%EOF", file=fh)
fh.flush()
if rcParams['ps.usedistiller'] == 'ghostscript':
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
elif rcParams['ps.usedistiller'] == 'xpdf':
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
if passed_in_file_object:
with open(tmpfile, 'rb') as fh:
outfile.write(fh.read())
else:
with open(outfile, 'w') as fh:
pass
mode = os.stat(outfile).st_mode
shutil.move(tmpfile, outfile)
os.chmod(outfile, mode)
def _print_figure_tex(self, outfile, format, dpi, facecolor, edgecolor,
orientation, isLandscape, papertype,
**kwargs):
"""
If text.usetex is True in rc, a temporary pair of tex/eps files
are created to allow tex to manage the text layout via the PSFrags
package. These files are processed to yield the final ps or eps file.
"""
isEPSF = format == 'eps'
title = outfile
self.figure.dpi = 72 # ignore the dpi kwarg
width, height = self.figure.get_size_inches()
xo = 0
yo = 0
l, b, w, h = self.figure.bbox.bounds
llx = xo
lly = yo
urx = llx + w
ury = lly + h
bbox = (llx, lly, urx, ury)
# generate PostScript code for the figure and store it in a string
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
dryrun = kwargs.get("dryrun", False)
if dryrun:
class NullWriter(object):
def write(self, *kl, **kwargs):
pass
self._pswriter = NullWriter()
else:
if sys.version_info[0] >= 3:
self._pswriter = io.StringIO()
else:
self._pswriter = cStringIO.StringIO()
# mixed mode rendering
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
ps_renderer = self._renderer_class(width, height,
self._pswriter, imagedpi=dpi)
renderer = MixedModeRenderer(self.figure,
width, height, dpi, ps_renderer,
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
if dryrun: # return immediately if dryrun (tightbbox=True)
return
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
# write to a temp file, we'll move it to outfile when done
fd, tmpfile = mkstemp()
if sys.version_info[0] >= 3:
fh = io.open(fd, 'w', encoding='ascii')
else:
fh = io.open(fd, 'wb')
with fh:
# write the Encapsulated PostScript headers
print("%!PS-Adobe-3.0 EPSF-3.0", file=fh)
if title: print("%%Title: "+title, file=fh)
print(("%%Creator: matplotlib version "
+__version__+", http://matplotlib.org/"), file=fh)
print("%%CreationDate: "+time.ctime(time.time()), file=fh)
print("%%%%BoundingBox: %d %d %d %d" % bbox, file=fh)
print("%%EndComments", file=fh)
Ndict = len(psDefs)
print("%%BeginProlog", file=fh)
print("/mpldict %d dict def"%Ndict, file=fh)
print("mpldict begin", file=fh)
for d in psDefs:
d=d.strip()
for l in d.split('\n'):
print(l.strip(), file=fh)
print("end", file=fh)
print("%%EndProlog", file=fh)
print("mpldict begin", file=fh)
#print >>fh, "gsave"
print("%s translate"%_nums_to_str(xo, yo), file=fh)
print("%s clipbox"%_nums_to_str(width*72, height*72, 0, 0), file=fh)
# write the figure
print(self._pswriter.getvalue(), file=fh)
# write the trailer
#print >>fh, "grestore"
print("end", file=fh)
print("showpage", file=fh)
fh.flush()
if isLandscape: # now we are ready to rotate
isLandscape = True
width, height = height, width
bbox = (lly, llx, ury, urx)
# set the paper size to the figure size if isEPSF. The
# resulting ps file has the given size with correct bounding
# box so that there is no need to call 'pstoeps'
if isEPSF:
paperWidth, paperHeight = self.figure.get_size_inches()
if isLandscape:
paperWidth, paperHeight = paperHeight, paperWidth
else:
temp_papertype = _get_papertype(width, height)
if papertype=='auto':
papertype = temp_papertype
paperWidth, paperHeight = papersize[temp_papertype]
else:
paperWidth, paperHeight = papersize[papertype]
if (width>paperWidth or height>paperHeight) and isEPSF:
paperWidth, paperHeight = papersize[temp_papertype]
verbose.report('Your figure is too big to fit on %s paper. %s \
paper will be used to prevent clipping.'%(papertype, temp_papertype), 'helpful')
texmanager = ps_renderer.get_texmanager()
font_preamble = texmanager.get_font_preamble()
custom_preamble = texmanager.get_custom_preamble()
psfrag_rotated = convert_psfrags(tmpfile, ps_renderer.psfrag,
font_preamble,
custom_preamble, paperWidth, paperHeight,
orientation)
if rcParams['ps.usedistiller'] == 'ghostscript':
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox,
rotated=psfrag_rotated)
elif rcParams['ps.usedistiller'] == 'xpdf':
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox,
rotated=psfrag_rotated)
elif rcParams['text.usetex']:
if False: pass # for debugging
else: gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox,
rotated=psfrag_rotated)
is_file = False
if sys.version_info[0] >= 3:
if isinstance(outfile, io.IOBase):
is_file = True
else:
if isinstance(outfile, file):
is_file = True
if is_file:
with open(tmpfile, 'rb') as fh:
outfile.write(fh.read())
else:
with open(outfile, 'wb') as fh:
pass
mode = os.stat(outfile).st_mode
shutil.move(tmpfile, outfile)
os.chmod(outfile, mode)
def convert_psfrags(tmpfile, psfrags, font_preamble, custom_preamble,
paperWidth, paperHeight, orientation):
"""
When we want to use the LaTeX backend with postscript, we write PSFrag tags
to a temporary postscript file, each one marking a position for LaTeX to
render some text. convert_psfrags generates a LaTeX document containing the
commands to convert those tags to text. LaTeX/dvips produces the postscript
file that includes the actual text.
"""
tmpdir = os.path.split(tmpfile)[0]
epsfile = tmpfile+'.eps'
shutil.move(tmpfile, epsfile)
latexfile = tmpfile+'.tex'
outfile = tmpfile+'.output'
dvifile = tmpfile+'.dvi'
psfile = tmpfile+'.ps'
if orientation=='landscape': angle = 90
else: angle = 0
if rcParams['text.latex.unicode']:
unicode_preamble = r"""\usepackage{ucs}
\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
s = r"""\documentclass{article}
%s
%s
%s
\usepackage[dvips, papersize={%sin,%sin}, body={%sin,%sin}, margin={0in,0in}]{geometry}
\usepackage{psfrag}
\usepackage[dvips]{graphicx}
\usepackage{color}
\pagestyle{empty}
\begin{document}
\begin{figure}
\centering
\leavevmode
%s
\includegraphics*[angle=%s]{%s}
\end{figure}
\end{document}
"""% (font_preamble, unicode_preamble, custom_preamble, paperWidth, paperHeight,
paperWidth, paperHeight,
'\n'.join(psfrags), angle, os.path.split(epsfile)[-1])
with io.open(latexfile, 'wb') as latexh:
if rcParams['text.latex.unicode']:
latexh.write(s.encode('utf8'))
else:
try:
latexh.write(s.encode('ascii'))
except UnicodeEncodeError:
verbose.report("You are using unicode and latex, but have "
"not enabled the matplotlib 'text.latex.unicode' "
"rcParam.", 'helpful')
raise
# the split drive part of the command is necessary for windows users with
# multiple
if sys.platform == 'win32': precmd = '%s &&'% os.path.splitdrive(tmpdir)[0]
else: precmd = ''
command = '%s cd "%s" && latex -interaction=nonstopmode "%s" > "%s"'\
%(precmd, tmpdir, latexfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
with io.open(outfile, 'rb') as fh:
if exit_status:
raise RuntimeError('LaTeX was not able to process your file:\
\nHere is the full report generated by LaTeX: \n\n%s'% fh.read())
else:
verbose.report(fh.read(), 'debug')
os.remove(outfile)
command = '%s cd "%s" && dvips -q -R0 -o "%s" "%s" > "%s"'%(precmd, tmpdir,
os.path.split(psfile)[-1], os.path.split(dvifile)[-1], outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
with io.open(outfile, 'rb') as fh:
if exit_status:
raise RuntimeError('dvips was not able to \
process the following file:\n%s\nHere is the full report generated by dvips: \
\n\n'% dvifile + fh.read())
else:
verbose.report(fh.read(), 'debug')
os.remove(outfile)
os.remove(epsfile)
shutil.move(psfile, tmpfile)
# check if the dvips created a ps in landscape paper. Somehow,
# above latex+dvips results in a ps file in a landscape mode for a
# certain figure sizes (e.g., 8.3in,5.8in which is a5). And the
# bounding box of the final output got messed up. We check see if
# the generated ps file is in landscape and return this
# information. The return value is used in pstoeps step to recover
# the correct bounding box. 2010-06-05 JJL
with open(tmpfile) as fh:
if "Landscape" in fh.read(1000):
psfrag_rotated = True
else:
psfrag_rotated = False
if not debugPS:
for fname in glob.glob(tmpfile+'.*'):
os.remove(fname)
return psfrag_rotated
def gs_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False):
"""
Use ghostscript's pswrite or epswrite device to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. The output is low-level, converting text to outlines.
"""
if eps: paper_option = "-dEPSCrop"
else: paper_option = "-sPAPERSIZE=%s" % ptype
psfile = tmpfile + '.ps'
outfile = tmpfile + '.output'
dpi = rcParams['ps.distiller.res']
gs_exe = ps_backend_helper.gs_exe
if ps_backend_helper.supports_ps2write: # gs version >= 9
device_name = "ps2write"
else:
device_name = "pswrite"
command = '%s -dBATCH -dNOPAUSE -r%d -sDEVICE=%s %s -sOutputFile="%s" \
"%s" > "%s"'% (gs_exe, dpi, device_name,
paper_option, psfile, tmpfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
with io.open(outfile, 'rb') as fh:
if exit_status:
raise RuntimeError('ghostscript was not able to process \
your image.\nHere is the full report generated by ghostscript:\n\n' + fh.read())
else:
verbose.report(fh.read(), 'debug')
os.remove(outfile)
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
# While it is best if above steps preserve the original bounding
# box, there seem to be cases when it is not. For those cases,
# the original bbox can be restored during the pstoeps step.
if eps:
# For some versions of gs, above steps result in an ps file
# where the original bbox is no more correct. Do not adjust
# bbox for now.
if ps_backend_helper.supports_ps2write:
# fo gs version >= 9 w/ ps2write device
pstoeps(tmpfile, bbox, rotated=rotated)
else:
pstoeps(tmpfile)
def xpdf_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False):
"""
Use ghostscript's ps2pdf and xpdf's/poppler's pdftops to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. This distiller is preferred, generating high-level postscript
output that treats text as text.
"""
pdffile = tmpfile + '.pdf'
psfile = tmpfile + '.ps'
outfile = tmpfile + '.output'
if eps: paper_option = "-dEPSCrop"
else: paper_option = "-sPAPERSIZE=%s" % ptype
command = 'ps2pdf -dAutoFilterColorImages=false \
-sColorImageFilter=FlateEncode %s "%s" "%s" > "%s"'% \
(paper_option, tmpfile, pdffile, outfile)
if sys.platform == 'win32': command = command.replace('=', '#')
verbose.report(command, 'debug')
exit_status = os.system(command)
with io.open(outfile, 'rb') as fh:
if exit_status:
raise RuntimeError('ps2pdf was not able to process your \
image.\n\Here is the report generated by ghostscript:\n\n' + fh.read())
else:
verbose.report(fh.read(), 'debug')
os.remove(outfile)
command = 'pdftops -paper match -level2 "%s" "%s" > "%s"'% \
(pdffile, psfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
with io.open(outfile, 'rb') as fh:
if exit_status:
raise RuntimeError('pdftops was not able to process your \
image.\nHere is the full report generated by pdftops: \n\n' + fh.read())
else:
verbose.report(fh.read(), 'debug')
os.remove(outfile)
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
if eps:
pstoeps(tmpfile)
for fname in glob.glob(tmpfile+'.*'):
os.remove(fname)
def get_bbox_header(lbrt, rotated=False):
"""
return a postscript header stringfor the given bbox lbrt=(l, b, r, t).
Optionally, return rotate command.
"""
l, b, r, t = lbrt
if rotated:
rotate = "%.2f %.2f translate\n90 rotate" % (l+r, 0)
else:
rotate = ""
bbox_info = '%%%%BoundingBox: %d %d %d %d' % (l, b, np.ceil(r), np.ceil(t))
hires_bbox_info = '%%%%HiResBoundingBox: %.6f %.6f %.6f %.6f' % (l, b, r, t)
return '\n'.join([bbox_info, hires_bbox_info]), rotate
# get_bbox is deprecated. I don't see any reason to use ghostscript to
# find the bounding box, as the required bounding box is alread known.
def get_bbox(tmpfile, bbox):
"""
Use ghostscript's bbox device to find the center of the bounding box. Return
an appropriately sized bbox centered around that point. A bit of a hack.
"""
outfile = tmpfile + '.output'
if sys.platform == 'win32': gs_exe = 'gswin32c'
else: gs_exe = 'gs'
command = '%s -dBATCH -dNOPAUSE -sDEVICE=bbox "%s"' %\
(gs_exe, tmpfile)
verbose.report(command, 'debug')
stdin, stdout, stderr = os.popen3(command)
verbose.report(stdout.read(), 'debug-annoying')
bbox_info = stderr.read()
verbose.report(bbox_info, 'helpful')
bbox_found = re.search('%%HiResBoundingBox: .*', bbox_info)
if bbox_found:
bbox_info = bbox_found.group()
else:
raise RuntimeError('Ghostscript was not able to extract a bounding box.\
Here is the Ghostscript output:\n\n%s'% bbox_info)
l, b, r, t = [float(i) for i in bbox_info.split()[-4:]]
# this is a hack to deal with the fact that ghostscript does not return the
# intended bbox, but a tight bbox. For now, we just center the ink in the
# intended bbox. This is not ideal, users may intend the ink to not be
# centered.
if bbox is None:
l, b, r, t = (l-1, b-1, r+1, t+1)
else:
x = (l+r)/2
y = (b+t)/2
dx = (bbox[2]-bbox[0])/2
dy = (bbox[3]-bbox[1])/2
l,b,r,t = (x-dx, y-dy, x+dx, y+dy)
bbox_info = '%%%%BoundingBox: %d %d %d %d' % (l, b, np.ceil(r), np.ceil(t))
hires_bbox_info = '%%%%HiResBoundingBox: %.6f %.6f %.6f %.6f' % (l, b, r, t)
return '\n'.join([bbox_info, hires_bbox_info])
def pstoeps(tmpfile, bbox=None, rotated=False):
"""
Convert the postscript to encapsulated postscript. The bbox of
the eps file will be replaced with the given *bbox* argument. If
None, original bbox will be used.
"""
# if rotated==True, the output eps file need to be rotated
if bbox:
bbox_info, rotate = get_bbox_header(bbox, rotated=rotated)
else:
bbox_info, rotate = None, None
epsfile = tmpfile + '.eps'
with io.open(epsfile, 'wb') as epsh:
write = epsh.write
with io.open(tmpfile, 'rb') as tmph:
line = tmph.readline()
# Modify the header:
while line:
if line.startswith(b'%!PS'):
write(b"%!PS-Adobe-3.0 EPSF-3.0\n")
if bbox:
write(bbox_info.encode('ascii') + b'\n')
elif line.startswith(b'%%EndComments'):
write(line)
write(b'%%BeginProlog\n')
write(b'save\n')
write(b'countdictstack\n')
write(b'mark\n')
write(b'newpath\n')
write(b'/showpage {} def\n')
write(b'/setpagedevice {pop} def\n')
write(b'%%EndProlog\n')
write(b'%%Page 1 1\n')
if rotate:
write(rotate.encode('ascii') + b'\n')
break
elif bbox and (line.startswith(b'%%Bound') \
or line.startswith(b'%%HiResBound') \
or line.startswith(b'%%DocumentMedia') \
or line.startswith(b'%%Pages')):
pass
else:
write(line)
line = tmph.readline()
# Now rewrite the rest of the file, and modify the trailer.
# This is done in a second loop such that the header of the embedded
# eps file is not modified.
line = tmph.readline()
while line:
if line.startswith(b'%%Trailer'):
write(b'%%Trailer\n')
write(b'cleartomark\n')
write(b'countdictstack\n')
write(b'exch sub { end } repeat\n')
write(b'restore\n')
if rcParams['ps.usedistiller'] == 'xpdf':
# remove extraneous "end" operator:
line = tmph.readline()
elif line.startswith(b'%%PageBoundingBox'):
pass
else:
write(line)
line = tmph.readline()
os.remove(tmpfile)
shutil.move(epsfile, tmpfile)
class FigureManagerPS(FigureManagerBase):
pass
FigureManager = FigureManagerPS
# The following Python dictionary psDefs contains the entries for the
# PostScript dictionary mpldict. This dictionary implements most of
# the matplotlib primitives and some abbreviations.
#
# References:
# http://www.adobe.com/products/postscript/pdfs/PLRM.pdf
# http://www.mactech.com/articles/mactech/Vol.09/09.04/PostscriptTutorial/
# http://www.math.ubc.ca/people/faculty/cass/graphics/text/www/
#
# The usage comments use the notation of the operator summary
# in the PostScript Language reference manual.
psDefs = [
# x y *m* -
"/m { moveto } bind def",
# x y *l* -
"/l { lineto } bind def",
# x y *r* -
"/r { rlineto } bind def",
# x1 y1 x2 y2 x y *c* -
"/c { curveto } bind def",
# *closepath* -
"/cl { closepath } bind def",
# w h x y *box* -
"""/box {
m
1 index 0 r
0 exch r
neg 0 r
cl
} bind def""",
# w h x y *clipbox* -
"""/clipbox {
box
clip
newpath
} bind def""",
]
| mit |
mayblue9/scikit-learn | sklearn/svm/tests/test_svm.py | 70 | 31674 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn.base import ChangedBehaviorWarning
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
@ignore_warnings
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1
assert np.abs(score1 - score2) < 0.1
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf.decision_function_shape = None
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_decision_function():
# Test SVR's decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='weighted')
<= metrics.f1_score(y, y_pred_balanced,
average='weighted'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1/L1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2/L2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("L2", "squared_hinge", "loss='L2'", "1.0"),
svm.LinearSVC(loss="L2").fit, X, y)
# LinearSVR
# loss l1/L1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("L1", "epsilon_insensitive", "loss='L1'",
"1.0"),
svm.LinearSVR(loss="L1").fit, X, y)
# loss l2/L2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
# FIXME remove in 0.18
def test_linear_svx_uppercase_loss_penalty():
# Check if Upper case notation is supported by _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
# loss SQUARED_hinge --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("SQUARED_hinge", "squared_hinge", "0.18"),
svm.LinearSVC(loss="SQUARED_hinge").fit, X, y)
# penalty L2 --> l2
assert_warns_message(DeprecationWarning,
msg.replace("loss", "penalty")
% ("L2", "l2", "0.18"),
svm.LinearSVC(penalty="L2").fit, X, y)
# loss EPSILON_INSENSITIVE --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive",
"0.18"),
svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
| bsd-3-clause |
ch3ll0v3k/scikit-learn | sklearn/metrics/tests/test_classification.py | 28 | 53546 | from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer
from sklearn.preprocessing import label_binarize
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, [(), ()]), 0)
assert_equal(accuracy_score(y1, y2, normalize=False), 1)
assert_equal(accuracy_score(y1, y1, normalize=False), 2)
assert_equal(accuracy_score(y2, y2, normalize=False), 2)
assert_equal(accuracy_score(y2, [(), ()], normalize=False), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
assert_dep_warning = partial(assert_warns, DeprecationWarning)
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings),
({'average': 'micro'}, assert_dep_warning)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
@ignore_warnings
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
@ignore_warnings
def test_precision_recall_f_extra_labels():
"""Test handling of explicit additional (not in input) labels to PRF
"""
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(6), average=average)
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(-1, 4), average=average)
@ignore_warnings
def test_precision_recall_f_ignored_labels():
"""Test a subset of labels may be requested for PRF"""
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(recall_13(average=average),
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situtation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='macro')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
y1 = np.array([0] * 40 + [1] * 60)
y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50)
kappa = cohen_kappa_score(y1, y2)
assert_almost_equal(kappa, .348, decimal=3)
assert_equal(kappa, cohen_kappa_score(y2, y1))
# Add spurious labels and ignore them.
y1 = np.append(y1, [2] * 4)
y2 = np.append(y2, [2] * 4)
assert_equal(cohen_kappa_score(y1, y2, labels=[0, 1]), kappa)
assert_almost_equal(cohen_kappa_score(y1, y1), 1.)
# Multiclass example: Artstein and Poesio, Table 4.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 52 + [1] * 32 + [2] * 16)
assert_almost_equal(cohen_kappa_score(y1, y2), .8013, decimal=4)
@ignore_warnings
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_multiclass_pos_label_none():
# Test Precision Recall and F1 Score for multiclass classification task
# GH Issue #1296
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='weighted')
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='weighted'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
if np_version[:3] < (1, 7, 0):
expected_message = ("NumPy < 1.7.0 does not implement"
" searchsorted on unicode data correctly.")
assert_raise_message(RuntimeError, expected_message,
classification_report, y_true, y_pred)
else:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
@ignore_warnings # sequence of sequences is deprecated
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
make_ml = make_multilabel_classification
_, y_true_ll = make_ml(n_features=1, n_classes=n_classes, random_state=0,
n_samples=n_samples)
_, y_pred_ll = make_ml(n_features=1, n_classes=n_classes, random_state=1,
n_samples=n_samples)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
lb = MultiLabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, [(), ()]), 1)
assert_equal(zero_one_loss(y2, [tuple(), (10, )]), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, np.logical_not(y2)), 1)
assert_equal(hamming_loss(y1, np.logical_not(y1)), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, [(), ()]), 0.75)
assert_equal(hamming_loss(y1, [tuple(), (10, )]), 0.625)
assert_almost_equal(hamming_loss(y2, [tuple(), (10, )],
classes=np.arange(11)), 0.1818, 2)
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, [(), ()]), 0)
# |y3 inter y4 | = [0, 1, 1]
# |y3 union y4 | = [2, 1, 3]
y3 = [(0,), (1,), (3,)]
y4 = [(4,), (4,), (5, 6)]
assert_almost_equal(jaccard_similarity_score(y3, y4), 0)
# |y5 inter y6 | = [0, 1, 1]
# |y5 union y6 | = [2, 1, 3]
y5 = [(0,), (1,), (2, 3)]
y6 = [(1,), (1,), (2, 0)]
assert_almost_equal(jaccard_similarity_score(y5, y6), (1 + 1 / 3) / 3)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true_ll = [(0,), (1,), (2, 3)]
y_pred_ll = [(1,), (1,), (2, 0)]
lb = LabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
#tp = [0, 1, 1, 0]
#fn = [1, 0, 0, 1]
#fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weigted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check weigted
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true_ll = [(1,), (2,), (2, 3)]
y_pred_ll = [(4,), (4,), (2, 1)]
lb = LabelBinarizer()
lb.fit([range(1, 5)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check weigted
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
@ignore_warnings
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true_ll = [(1,), (0,), (2, 1,)]
y_pred_ll = [tuple(), (3,), (2, 1)]
lb = LabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='macro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='macro')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_compat():
# Ensure warning if f1_score et al.'s average is implicit for multiclass
y_true = [1, 2, 3, 3]
y_pred = [1, 2, 3, 1]
y_true_bin = [0, 1, 1]
y_pred_bin = [0, 1, 0]
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
score = assert_warns(DeprecationWarning, metric, y_true, y_pred)
score_weighted = assert_no_warnings(metric, y_true, y_pred,
average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default')
# check binary passes without warning
assert_no_warnings(metric, y_true_bin, y_pred_bin)
# but binary with pos_label=None should behave like multiclass
score = assert_warns(DeprecationWarning, metric,
y_true_bin, y_pred_bin, pos_label=None)
score_weighted = assert_no_warnings(metric, y_true_bin, y_pred_bin,
pos_label=None, average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default with '
'binary data and pos_label=None')
@ignore_warnings # sequence of sequences is deprecated
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
SEQ = 'multilabel-sequences'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(SEQ, [[2, 3], [1], [3]]),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(SEQ, SEQ): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(IND, SEQ): None,
(MC, SEQ): None,
(BIN, SEQ): None,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(SEQ, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(SEQ, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(SEQ, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, SEQ, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, 0.24],
[-2.36, -0.79, -0.27, 0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
| bsd-3-clause |
nelango/ViralityAnalysis | model/lib/pandas/io/pytables.py | 9 | 156275 | """
High level interface to PyTables for reading and writing pandas data structures
to disk
"""
# pylint: disable-msg=E1101,W0613,W0603
from datetime import datetime, date
import time
import re
import copy
import itertools
import warnings
import os
import numpy as np
import pandas as pd
from pandas import (Series, DataFrame, Panel, Panel4D, Index,
MultiIndex, Int64Index, Timestamp)
from pandas.sparse.api import SparseSeries, SparseDataFrame, SparsePanel
from pandas.sparse.array import BlockIndex, IntIndex
from pandas.tseries.api import PeriodIndex, DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.core.base import StringMixin
from pandas.core.common import adjoin, pprint_thing
from pandas.core.algorithms import match, unique
from pandas.core.categorical import Categorical
from pandas.core.common import _asarray_tuplesafe
from pandas.core.internals import (BlockManager, make_block, _block2d_to_blocknd,
_factor_indexer, _block_shape)
from pandas.core.index import _ensure_index
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type
import pandas.core.common as com
from pandas.tools.merge import concat
from pandas import compat
from pandas.compat import u_safe as u, PY3, range, lrange, string_types, filter
from pandas.io.common import PerformanceWarning
from pandas.core.config import get_option
from pandas.computation.pytables import Expr, maybe_expression
import pandas.lib as lib
import pandas.algos as algos
import pandas.tslib as tslib
from contextlib import contextmanager
from distutils.version import LooseVersion
# versioning attribute
_version = '0.15.2'
### encoding ###
# PY3 encoding if we don't specify
_default_encoding = 'UTF-8'
def _ensure_decoded(s):
""" if we have bytes, decode them to unicode """
if isinstance(s, np.bytes_):
s = s.decode('UTF-8')
return s
def _ensure_encoding(encoding):
# set the encoding if we need
if encoding is None:
if PY3:
encoding = _default_encoding
return encoding
Term = Expr
def _ensure_term(where, scope_level):
"""
ensure that the where is a Term or a list of Term
this makes sure that we are capturing the scope of variables
that are passed
create the terms here with a frame_level=2 (we are 2 levels down)
"""
# only consider list/tuple here as an ndarray is automaticaly a coordinate
# list
level = scope_level + 1
if isinstance(where, (list, tuple)):
wlist = []
for w in filter(lambda x: x is not None, where):
if not maybe_expression(w):
wlist.append(w)
else:
wlist.append(Term(w, scope_level=level))
where = wlist
elif maybe_expression(where):
where = Term(where, scope_level=level)
return where
class PossibleDataLossError(Exception):
pass
class ClosedFileError(Exception):
pass
class IncompatibilityWarning(Warning):
pass
incompatibility_doc = """
where criteria is being ignored as this version [%s] is too old (or
not-defined), read the file in and write it out to a new file to upgrade (with
the copy_to method)
"""
class AttributeConflictWarning(Warning):
pass
attribute_conflict_doc = """
the [%s] attribute of the existing index is [%s] which conflicts with the new
[%s], resetting the attribute to None
"""
class DuplicateWarning(Warning):
pass
duplicate_doc = """
duplicate entries in table, taking most recently appended
"""
performance_doc = """
your performance may suffer as PyTables will pickle object types that it cannot
map directly to c-types [inferred_type->%s,key->%s] [items->%s]
"""
# formats
_FORMAT_MAP = {
u('f'): 'fixed',
u('fixed'): 'fixed',
u('t'): 'table',
u('table'): 'table',
}
format_deprecate_doc = """
the table keyword has been deprecated
use the format='fixed(f)|table(t)' keyword instead
fixed(f) : specifies the Fixed format
and is the default for put operations
table(t) : specifies the Table format
and is the default for append operations
"""
# map object types
_TYPE_MAP = {
Series: u('series'),
SparseSeries: u('sparse_series'),
pd.TimeSeries: u('series'),
DataFrame: u('frame'),
SparseDataFrame: u('sparse_frame'),
Panel: u('wide'),
Panel4D: u('ndim'),
SparsePanel: u('sparse_panel')
}
# storer class map
_STORER_MAP = {
u('TimeSeries'): 'LegacySeriesFixed',
u('Series'): 'LegacySeriesFixed',
u('DataFrame'): 'LegacyFrameFixed',
u('DataMatrix'): 'LegacyFrameFixed',
u('series'): 'SeriesFixed',
u('sparse_series'): 'SparseSeriesFixed',
u('frame'): 'FrameFixed',
u('sparse_frame'): 'SparseFrameFixed',
u('wide'): 'PanelFixed',
u('sparse_panel'): 'SparsePanelFixed',
}
# table class map
_TABLE_MAP = {
u('generic_table'): 'GenericTable',
u('appendable_series'): 'AppendableSeriesTable',
u('appendable_multiseries'): 'AppendableMultiSeriesTable',
u('appendable_frame'): 'AppendableFrameTable',
u('appendable_multiframe'): 'AppendableMultiFrameTable',
u('appendable_panel'): 'AppendablePanelTable',
u('appendable_ndim'): 'AppendableNDimTable',
u('worm'): 'WORMTable',
u('legacy_frame'): 'LegacyFrameTable',
u('legacy_panel'): 'LegacyPanelTable',
}
# axes map
_AXES_MAP = {
DataFrame: [0],
Panel: [1, 2],
Panel4D: [1, 2, 3],
}
# register our configuration options
from pandas.core import config
dropna_doc = """
: boolean
drop ALL nan rows when appending to a table
"""
format_doc = """
: format
default format writing format, if None, then
put will default to 'fixed' and append will default to 'table'
"""
with config.config_prefix('io.hdf'):
config.register_option('dropna_table', False, dropna_doc,
validator=config.is_bool)
config.register_option(
'default_format', None, format_doc,
validator=config.is_one_of_factory(['fixed', 'table', None])
)
# oh the troubles to reduce import time
_table_mod = None
_table_file_open_policy_is_strict = False
def _tables():
global _table_mod
global _table_file_open_policy_is_strict
if _table_mod is None:
import tables
_table_mod = tables
# version requirements
if LooseVersion(tables.__version__) < '3.0.0':
raise ImportError("PyTables version >= 3.0.0 is required")
# set the file open policy
# return the file open policy; this changes as of pytables 3.1
# depending on the HDF5 version
try:
_table_file_open_policy_is_strict = tables.file._FILE_OPEN_POLICY == 'strict'
except:
pass
return _table_mod
# interface to/from ###
def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None,
append=None, **kwargs):
""" store this object, close it if we opened it """
if append:
f = lambda store: store.append(key, value, **kwargs)
else:
f = lambda store: store.put(key, value, **kwargs)
if isinstance(path_or_buf, string_types):
with HDFStore(path_or_buf, mode=mode, complevel=complevel,
complib=complib) as store:
f(store)
else:
f(path_or_buf)
def read_hdf(path_or_buf, key=None, **kwargs):
""" read from the store, close it if we opened it
Retrieve pandas object stored in file, optionally based on where
criteria
Parameters
----------
path_or_buf : path (string), or buffer to read from
key : group identifier in the store. Can be omitted a HDF file contains
a single pandas object.
where : list of Term (or convertable) objects, optional
start : optional, integer (defaults to None), row number to start
selection
stop : optional, integer (defaults to None), row number to stop
selection
columns : optional, a list of columns that if not None, will limit the
return columns
iterator : optional, boolean, return an iterator, default False
chunksize : optional, nrows to include in iteration, return an iterator
Returns
-------
The selected object
"""
# grab the scope
if 'where' in kwargs:
kwargs['where'] = _ensure_term(kwargs['where'], scope_level=1)
if isinstance(path_or_buf, string_types):
try:
exists = os.path.exists(path_or_buf)
#if filepath is too long
except (TypeError,ValueError):
exists = False
if not exists:
raise IOError('File %s does not exist' % path_or_buf)
# can't auto open/close if we are using an iterator
# so delegate to the iterator
store = HDFStore(path_or_buf, **kwargs)
auto_close = True
elif isinstance(path_or_buf, HDFStore):
if not path_or_buf.is_open:
raise IOError('The HDFStore must be open for reading.')
store = path_or_buf
auto_close = False
else:
raise NotImplementedError('Support for generic buffers has not been '
'implemented.')
try:
if key is None:
keys = store.keys()
if len(keys) != 1:
raise ValueError('key must be provided when HDF file contains '
'multiple datasets.')
key = keys[0]
return store.select(key, auto_close=auto_close, **kwargs)
except:
# if there is an error, close the store
try:
store.close()
except:
pass
raise
class HDFStore(StringMixin):
"""
dict-like IO interface for storing pandas objects in PyTables
either Fixed or Table format.
Parameters
----------
path : string
File path to HDF5 file
mode : {'a', 'w', 'r', 'r+'}, default 'a'
``'r'``
Read-only; no data can be modified.
``'w'``
Write; a new file is created (an existing file with the same
name would be deleted).
``'a'``
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
``'r+'``
It is similar to ``'a'``, but the file must already exist.
complevel : int, 1-9, default 0
If a complib is specified compression will be applied
where possible
complib : {'zlib', 'bzip2', 'lzo', 'blosc', None}, default None
If complevel is > 0 apply compression to objects written
in the store wherever possible
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum
Examples
--------
>>> from pandas import DataFrame
>>> from numpy.random import randn
>>> bar = DataFrame(randn(10, 4))
>>> store = HDFStore('test.h5')
>>> store['foo'] = bar # write to HDF5
>>> bar = store['foo'] # retrieve
>>> store.close()
"""
def __init__(self, path, mode=None, complevel=None, complib=None,
fletcher32=False, **kwargs):
try:
import tables
except ImportError as ex: # pragma: no cover
raise ImportError('HDFStore requires PyTables, "{ex}" problem importing'.format(ex=str(ex)))
if complib not in (None, 'blosc', 'bzip2', 'lzo', 'zlib'):
raise ValueError("complib only supports 'blosc', 'bzip2', lzo' "
"or 'zlib' compression.")
self._path = path
if mode is None:
mode = 'a'
self._mode = mode
self._handle = None
self._complevel = complevel
self._complib = complib
self._fletcher32 = fletcher32
self._filters = None
self.open(mode=mode, **kwargs)
@property
def root(self):
""" return the root node """
self._check_if_open()
return self._handle.root
@property
def filename(self):
return self._path
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
self.put(key, value)
def __delitem__(self, key):
return self.remove(key)
def __getattr__(self, name):
""" allow attribute access to get stores """
self._check_if_open()
try:
return self.get(name)
except:
pass
raise AttributeError("'%s' object has no attribute '%s'" %
(type(self).__name__, name))
def __contains__(self, key):
""" check for existance of this key
can match the exact pathname or the pathnm w/o the leading '/'
"""
node = self.get_node(key)
if node is not None:
name = node._v_pathname
if name == key or name[1:] == key:
return True
return False
def __len__(self):
return len(self.groups())
def __unicode__(self):
output = '%s\nFile path: %s\n' % (type(self), pprint_thing(self._path))
if self.is_open:
lkeys = sorted(list(self.keys()))
if len(lkeys):
keys = []
values = []
for k in lkeys:
try:
s = self.get_storer(k)
if s is not None:
keys.append(pprint_thing(s.pathname or k))
values.append(
pprint_thing(s or 'invalid_HDFStore node'))
except Exception as detail:
keys.append(k)
values.append("[invalid_HDFStore node: %s]"
% pprint_thing(detail))
output += adjoin(12, keys, values)
else:
output += 'Empty'
else:
output += "File is CLOSED"
return output
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def keys(self):
"""
Return a (potentially unordered) list of the keys corresponding to the
objects stored in the HDFStore. These are ABSOLUTE path-names (e.g.
have the leading '/'
"""
return [n._v_pathname for n in self.groups()]
def items(self):
"""
iterate on key->group
"""
for g in self.groups():
yield g._v_pathname, g
iteritems = items
def open(self, mode='a', **kwargs):
"""
Open the file in the specified mode
Parameters
----------
mode : {'a', 'w', 'r', 'r+'}, default 'a'
See HDFStore docstring or tables.open_file for info about modes
"""
tables = _tables()
if self._mode != mode:
# if we are changing a write mode to read, ok
if self._mode in ['a', 'w'] and mode in ['r', 'r+']:
pass
elif mode in ['w']:
# this would truncate, raise here
if self.is_open:
raise PossibleDataLossError(
"Re-opening the file [{0}] with mode [{1}] "
"will delete the current file!"
.format(self._path, self._mode)
)
self._mode = mode
# close and reopen the handle
if self.is_open:
self.close()
if self._complib is not None:
if self._complevel is None:
self._complevel = 9
self._filters = _tables().Filters(self._complevel,
self._complib,
fletcher32=self._fletcher32)
try:
self._handle = tables.open_file(self._path, self._mode, **kwargs)
except (IOError) as e: # pragma: no cover
if 'can not be written' in str(e):
print('Opening %s in read-only mode' % self._path)
self._handle = tables.open_file(self._path, 'r', **kwargs)
else:
raise
except (ValueError) as e:
# trap PyTables >= 3.1 FILE_OPEN_POLICY exception
# to provide an updated message
if 'FILE_OPEN_POLICY' in str(e):
e = ValueError("PyTables [{version}] no longer supports opening multiple files\n"
"even in read-only mode on this HDF5 version [{hdf_version}]. You can accept this\n"
"and not open the same file multiple times at once,\n"
"upgrade the HDF5 version, or downgrade to PyTables 3.0.0 which allows\n"
"files to be opened multiple times at once\n".format(version=tables.__version__,
hdf_version=tables.get_hdf5_version()))
raise e
except (Exception) as e:
# trying to read from a non-existant file causes an error which
# is not part of IOError, make it one
if self._mode == 'r' and 'Unable to open/create file' in str(e):
raise IOError(str(e))
raise
def close(self):
"""
Close the PyTables file handle
"""
if self._handle is not None:
self._handle.close()
self._handle = None
@property
def is_open(self):
"""
return a boolean indicating whether the file is open
"""
if self._handle is None:
return False
return bool(self._handle.isopen)
def flush(self, fsync=False):
"""
Force all buffered modifications to be written to disk.
Parameters
----------
fsync : bool (default False)
call ``os.fsync()`` on the file handle to force writing to disk.
Notes
-----
Without ``fsync=True``, flushing may not guarantee that the OS writes
to disk. With fsync, the operation will block until the OS claims the
file has been written; however, other caching layers may still
interfere.
"""
if self._handle is not None:
self._handle.flush()
if fsync:
try:
os.fsync(self._handle.fileno())
except:
pass
def get(self, key):
"""
Retrieve pandas object stored in file
Parameters
----------
key : object
Returns
-------
obj : type of object stored in file
"""
group = self.get_node(key)
if group is None:
raise KeyError('No object named %s in the file' % key)
return self._read_group(group)
def select(self, key, where=None, start=None, stop=None, columns=None,
iterator=False, chunksize=None, auto_close=False, **kwargs):
"""
Retrieve pandas object stored in file, optionally based on where
criteria
Parameters
----------
key : object
where : list of Term (or convertable) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
columns : a list of columns that if not None, will limit the return
columns
iterator : boolean, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
auto_close : boolean, should automatically close the store when
finished, default is False
Returns
-------
The selected object
"""
group = self.get_node(key)
if group is None:
raise KeyError('No object named %s in the file' % key)
# create the storer and axes
where = _ensure_term(where, scope_level=1)
s = self._create_storer(group)
s.infer_axes()
# function to call on iteration
def func(_start, _stop, _where):
return s.read(start=_start, stop=_stop,
where=_where,
columns=columns, **kwargs)
# create the iterator
it = TableIterator(self, s, func, where=where, nrows=s.nrows, start=start,
stop=stop, iterator=iterator, chunksize=chunksize,
auto_close=auto_close)
return it.get_result()
def select_as_coordinates(
self, key, where=None, start=None, stop=None, **kwargs):
"""
return the selection as an Index
Parameters
----------
key : object
where : list of Term (or convertable) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
"""
where = _ensure_term(where, scope_level=1)
return self.get_storer(key).read_coordinates(where=where, start=start,
stop=stop, **kwargs)
def select_column(self, key, column, **kwargs):
"""
return a single column from the table. This is generally only useful to
select an indexable
Parameters
----------
key : object
column: the column of interest
Exceptions
----------
raises KeyError if the column is not found (or key is not a valid
store)
raises ValueError if the column can not be extracted individually (it
is part of a data block)
"""
return self.get_storer(key).read_column(column=column, **kwargs)
def select_as_multiple(self, keys, where=None, selector=None, columns=None,
start=None, stop=None, iterator=False,
chunksize=None, auto_close=False, **kwargs):
""" Retrieve pandas objects from multiple tables
Parameters
----------
keys : a list of the tables
selector : the table to apply the where criteria (defaults to keys[0]
if not supplied)
columns : the columns I want back
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
iterator : boolean, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
Exceptions
----------
raises KeyError if keys or selector is not found or keys is empty
raises TypeError if keys is not a list or tuple
raises ValueError if the tables are not ALL THE SAME DIMENSIONS
"""
# default to single select
where = _ensure_term(where, scope_level=1)
if isinstance(keys, (list, tuple)) and len(keys) == 1:
keys = keys[0]
if isinstance(keys, string_types):
return self.select(key=keys, where=where, columns=columns,
start=start, stop=stop, iterator=iterator,
chunksize=chunksize, **kwargs)
if not isinstance(keys, (list, tuple)):
raise TypeError("keys must be a list/tuple")
if not len(keys):
raise ValueError("keys must have a non-zero length")
if selector is None:
selector = keys[0]
# collect the tables
tbls = [self.get_storer(k) for k in keys]
s = self.get_storer(selector)
# validate rows
nrows = None
for t, k in itertools.chain([(s,selector)], zip(tbls, keys)):
if t is None:
raise KeyError("Invalid table [%s]" % k)
if not t.is_table:
raise TypeError(
"object [%s] is not a table, and cannot be used in all "
"select as multiple" % t.pathname
)
if nrows is None:
nrows = t.nrows
elif t.nrows != nrows:
raise ValueError(
"all tables must have exactly the same nrows!")
# axis is the concentation axes
axis = list(set([t.non_index_axes[0][0] for t in tbls]))[0]
def func(_start, _stop, _where):
# retrieve the objs, _where is always passed as a set of coordinates here
objs = [t.read(where=_where, columns=columns, **kwargs) for t in tbls]
# concat and return
return concat(objs, axis=axis,
verify_integrity=False).consolidate()
# create the iterator
it = TableIterator(self, s, func, where=where, nrows=nrows, start=start,
stop=stop, iterator=iterator, chunksize=chunksize,
auto_close=auto_close)
return it.get_result(coordinates=True)
def put(self, key, value, format=None, append=False, **kwargs):
"""
Store object in HDFStore
Parameters
----------
key : object
value : {Series, DataFrame, Panel}
format : 'fixed(f)|table(t)', default is 'fixed'
fixed(f) : Fixed format
Fast writing/reading. Not-appendable, nor searchable
table(t) : Table format
Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching
/ selecting subsets of the data
append : boolean, default False
This will force Table format, append the input data to the
existing.
encoding : default None, provide an encoding for strings
dropna : boolean, default False, do not write an ALL nan row to
the store settable by the option 'io.hdf.dropna_table'
"""
if format is None:
format = get_option("io.hdf.default_format") or 'fixed'
kwargs = self._validate_format(format, kwargs)
self._write_to_group(key, value, append=append, **kwargs)
def remove(self, key, where=None, start=None, stop=None):
"""
Remove pandas object partially by specifying the where condition
Parameters
----------
key : string
Node to remove or delete rows from
where : list of Term (or convertable) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
Returns
-------
number of rows removed (or None if not a Table)
Exceptions
----------
raises KeyError if key is not a valid store
"""
where = _ensure_term(where, scope_level=1)
try:
s = self.get_storer(key)
except:
if where is not None:
raise ValueError(
"trying to remove a node with a non-None where clause!")
# we are actually trying to remove a node (with children)
s = self.get_node(key)
if s is not None:
s._f_remove(recursive=True)
return None
if s is None:
raise KeyError('No object named %s in the file' % key)
# remove the node
if where is None and start is None and stop is None:
s.group._f_remove(recursive=True)
# delete from the table
else:
if not s.is_table:
raise ValueError(
'can only remove with where on objects written as tables')
return s.delete(where=where, start=start, stop=stop)
def append(self, key, value, format=None, append=True, columns=None,
dropna=None, **kwargs):
"""
Append to Table in file. Node must already exist and be Table
format.
Parameters
----------
key : object
value : {Series, DataFrame, Panel, Panel4D}
format: 'table' is the default
table(t) : table format
Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching
/ selecting subsets of the data
append : boolean, default True, append the input data to the
existing
data_columns : list of columns to create as data columns, or True to
use all columns
min_itemsize : dict of columns that specify minimum string sizes
nan_rep : string to use as string nan represenation
chunksize : size to chunk the writing
expectedrows : expected TOTAL row size of this table
encoding : default None, provide an encoding for strings
dropna : boolean, default False, do not write an ALL nan row to
the store settable by the option 'io.hdf.dropna_table'
Notes
-----
Does *not* check if data being appended overlaps with existing
data in the table, so be careful
"""
if columns is not None:
raise TypeError("columns is not a supported keyword in append, "
"try data_columns")
if dropna is None:
dropna = get_option("io.hdf.dropna_table")
if format is None:
format = get_option("io.hdf.default_format") or 'table'
kwargs = self._validate_format(format, kwargs)
self._write_to_group(key, value, append=append, dropna=dropna,
**kwargs)
def append_to_multiple(self, d, value, selector, data_columns=None,
axes=None, dropna=False, **kwargs):
"""
Append to multiple tables
Parameters
----------
d : a dict of table_name to table_columns, None is acceptable as the
values of one node (this will get all the remaining columns)
value : a pandas object
selector : a string that designates the indexable table; all of its
columns will be designed as data_columns, unless data_columns is
passed, in which case these are used
data_columns : list of columns to create as data columns, or True to
use all columns
dropna : if evaluates to True, drop rows from all tables if any single
row in each table has all NaN. Default False.
Notes
-----
axes parameter is currently not accepted
"""
if axes is not None:
raise TypeError("axes is currently not accepted as a parameter to"
" append_to_multiple; you can create the "
"tables independently instead")
if not isinstance(d, dict):
raise ValueError(
"append_to_multiple must have a dictionary specified as the "
"way to split the value"
)
if selector not in d:
raise ValueError(
"append_to_multiple requires a selector that is in passed dict"
)
# figure out the splitting axis (the non_index_axis)
axis = list(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))[0]
# figure out how to split the value
remain_key = None
remain_values = []
for k, v in d.items():
if v is None:
if remain_key is not None:
raise ValueError(
"append_to_multiple can only have one value in d that "
"is None"
)
remain_key = k
else:
remain_values.extend(v)
if remain_key is not None:
ordered = value.axes[axis]
ordd = ordered.difference(Index(remain_values))
ordd = sorted(ordered.get_indexer(ordd))
d[remain_key] = ordered.take(ordd)
# data_columns
if data_columns is None:
data_columns = d[selector]
# ensure rows are synchronized across the tables
if dropna:
idxs = (value[cols].dropna(how='all').index for cols in d.values())
valid_index = next(idxs)
for index in idxs:
valid_index = valid_index.intersection(index)
value = value.ix[valid_index]
# append
for k, v in d.items():
dc = data_columns if k == selector else None
# compute the val
val = value.reindex_axis(v, axis=axis)
self.append(k, val, data_columns=dc, **kwargs)
def create_table_index(self, key, **kwargs):
""" Create a pytables index on the table
Paramaters
----------
key : object (the node to index)
Exceptions
----------
raises if the node is not a table
"""
# version requirements
_tables()
s = self.get_storer(key)
if s is None:
return
if not s.is_table:
raise TypeError(
"cannot create table index on a Fixed format store")
s.create_index(**kwargs)
def groups(self):
"""return a list of all the top-level nodes (that are not themselves a
pandas storage object)
"""
_tables()
self._check_if_open()
return [
g for g in self._handle.walk_nodes()
if (getattr(g._v_attrs, 'pandas_type', None) or
getattr(g, 'table', None) or
(isinstance(g, _table_mod.table.Table) and
g._v_name != u('table')))
]
def get_node(self, key):
""" return the node with the key or None if it does not exist """
self._check_if_open()
try:
if not key.startswith('/'):
key = '/' + key
return self._handle.get_node(self.root, key)
except:
return None
def get_storer(self, key):
""" return the storer object for a key, raise if not in the file """
group = self.get_node(key)
if group is None:
return None
s = self._create_storer(group)
s.infer_axes()
return s
def copy(self, file, mode='w', propindexes=True, keys=None, complib=None,
complevel=None, fletcher32=False, overwrite=True):
""" copy the existing store to a new file, upgrading in place
Parameters
----------
propindexes: restore indexes in copied file (defaults to True)
keys : list of keys to include in the copy (defaults to all)
overwrite : overwrite (remove and replace) existing nodes in the
new store (default is True)
mode, complib, complevel, fletcher32 same as in HDFStore.__init__
Returns
-------
open file handle of the new store
"""
new_store = HDFStore(
file,
mode=mode,
complib=complib,
complevel=complevel,
fletcher32=fletcher32)
if keys is None:
keys = list(self.keys())
if not isinstance(keys, (tuple, list)):
keys = [keys]
for k in keys:
s = self.get_storer(k)
if s is not None:
if k in new_store:
if overwrite:
new_store.remove(k)
data = self.select(k)
if s.is_table:
index = False
if propindexes:
index = [a.name for a in s.axes if a.is_indexed]
new_store.append(
k, data, index=index,
data_columns=getattr(s, 'data_columns', None),
encoding=s.encoding
)
else:
new_store.put(k, data, encoding=s.encoding)
return new_store
# private methods ######
def _check_if_open(self):
if not self.is_open:
raise ClosedFileError("{0} file is not open!".format(self._path))
def _validate_format(self, format, kwargs):
""" validate / deprecate formats; return the new kwargs """
kwargs = kwargs.copy()
# validate
try:
kwargs['format'] = _FORMAT_MAP[format.lower()]
except:
raise TypeError("invalid HDFStore format specified [{0}]"
.format(format))
return kwargs
def _create_storer(self, group, format=None, value=None, append=False,
**kwargs):
""" return a suitable class to operate """
def error(t):
raise TypeError(
"cannot properly create the storer for: [%s] [group->%s,"
"value->%s,format->%s,append->%s,kwargs->%s]"
% (t, group, type(value), format, append, kwargs)
)
pt = _ensure_decoded(getattr(group._v_attrs, 'pandas_type', None))
tt = _ensure_decoded(getattr(group._v_attrs, 'table_type', None))
# infer the pt from the passed value
if pt is None:
if value is None:
_tables()
if (getattr(group, 'table', None) or
isinstance(group, _table_mod.table.Table)):
pt = u('frame_table')
tt = u('generic_table')
else:
raise TypeError(
"cannot create a storer if the object is not existing "
"nor a value are passed")
else:
try:
pt = _TYPE_MAP[type(value)]
except:
error('_TYPE_MAP')
# we are actually a table
if format == 'table':
pt += u('_table')
# a storer node
if u('table') not in pt:
try:
return globals()[_STORER_MAP[pt]](self, group, **kwargs)
except:
error('_STORER_MAP')
# existing node (and must be a table)
if tt is None:
# if we are a writer, determin the tt
if value is not None:
if pt == u('series_table'):
index = getattr(value, 'index', None)
if index is not None:
if index.nlevels == 1:
tt = u('appendable_series')
elif index.nlevels > 1:
tt = u('appendable_multiseries')
elif pt == u('frame_table'):
index = getattr(value, 'index', None)
if index is not None:
if index.nlevels == 1:
tt = u('appendable_frame')
elif index.nlevels > 1:
tt = u('appendable_multiframe')
elif pt == u('wide_table'):
tt = u('appendable_panel')
elif pt == u('ndim_table'):
tt = u('appendable_ndim')
else:
# distiguish between a frame/table
tt = u('legacy_panel')
try:
fields = group.table._v_attrs.fields
if len(fields) == 1 and fields[0] == u('value'):
tt = u('legacy_frame')
except:
pass
try:
return globals()[_TABLE_MAP[tt]](self, group, **kwargs)
except:
error('_TABLE_MAP')
def _write_to_group(self, key, value, format, index=True, append=False,
complib=None, encoding=None, **kwargs):
group = self.get_node(key)
# remove the node if we are not appending
if group is not None and not append:
self._handle.remove_node(group, recursive=True)
group = None
# we don't want to store a table node at all if are object is 0-len
# as there are not dtypes
if getattr(value, 'empty', None) and (format == 'table' or append):
return
if group is None:
paths = key.split('/')
# recursively create the groups
path = '/'
for p in paths:
if not len(p):
continue
new_path = path
if not path.endswith('/'):
new_path += '/'
new_path += p
group = self.get_node(new_path)
if group is None:
group = self._handle.create_group(path, p)
path = new_path
s = self._create_storer(group, format, value, append=append,
encoding=encoding, **kwargs)
if append:
# raise if we are trying to append to a Fixed format,
# or a table that exists (and we are putting)
if (not s.is_table or
(s.is_table and format == 'fixed' and s.is_exists)):
raise ValueError('Can only append to Tables')
if not s.is_exists:
s.set_object_info()
else:
s.set_object_info()
if not s.is_table and complib:
raise ValueError(
'Compression not supported on Fixed format stores'
)
# write the object
s.write(obj=value, append=append, complib=complib, **kwargs)
if s.is_table and index:
s.create_index(columns=index)
def _read_group(self, group, **kwargs):
s = self._create_storer(group)
s.infer_axes()
return s.read(**kwargs)
def get_store(path, **kwargs):
""" Backwards compatible alias for ``HDFStore``
"""
return HDFStore(path, **kwargs)
class TableIterator(object):
""" define the iteration interface on a table
Parameters
----------
store : the reference store
s : the refered storer
func : the function to execute the query
where : the where of the query
nrows : the rows to iterate on
start : the passed start value (default is None)
stop : the passed stop value (default is None)
iterator : boolean, whether to use the default iterator
chunksize : the passed chunking value (default is 50000)
auto_close : boolean, automatically close the store at the end of
iteration, default is False
kwargs : the passed kwargs
"""
def __init__(self, store, s, func, where, nrows, start=None, stop=None,
iterator=False, chunksize=None, auto_close=False):
self.store = store
self.s = s
self.func = func
self.where = where
self.nrows = nrows or 0
self.start = start or 0
if stop is None:
stop = self.nrows
self.stop = min(self.nrows, stop)
self.coordinates = None
if iterator or chunksize is not None:
if chunksize is None:
chunksize = 100000
self.chunksize = int(chunksize)
else:
self.chunksize = None
self.auto_close = auto_close
def __iter__(self):
# iterate
current = self.start
while current < self.stop:
stop = min(current + self.chunksize, self.stop)
value = self.func(None, None, self.coordinates[current:stop])
current = stop
if value is None or not len(value):
continue
yield value
self.close()
def close(self):
if self.auto_close:
self.store.close()
def get_result(self, coordinates=False):
# return the actual iterator
if self.chunksize is not None:
if not self.s.is_table:
raise TypeError(
"can only use an iterator or chunksize on a table")
self.coordinates = self.s.read_coordinates(where=self.where)
return self
# if specified read via coordinates (necessary for multiple selections
if coordinates:
where = self.s.read_coordinates(where=self.where)
else:
where = self.where
# directly return the result
results = self.func(self.start, self.stop, where)
self.close()
return results
class IndexCol(StringMixin):
""" an index column description class
Parameters
----------
axis : axis which I reference
values : the ndarray like converted values
kind : a string description of this type
typ : the pytables type
pos : the position in the pytables
"""
is_an_indexable = True
is_data_indexable = True
_info_fields = ['freq', 'tz', 'index_name']
def __init__(self, values=None, kind=None, typ=None, cname=None,
itemsize=None, name=None, axis=None, kind_attr=None,
pos=None, freq=None, tz=None, index_name=None, **kwargs):
self.values = values
self.kind = kind
self.typ = typ
self.itemsize = itemsize
self.name = name
self.cname = cname
self.kind_attr = kind_attr
self.axis = axis
self.pos = pos
self.freq = freq
self.tz = tz
self.index_name = index_name
self.table = None
self.meta = None
self.metadata = None
if name is not None:
self.set_name(name, kind_attr)
if pos is not None:
self.set_pos(pos)
def set_name(self, name, kind_attr=None):
""" set the name of this indexer """
self.name = name
self.kind_attr = kind_attr or "%s_kind" % name
if self.cname is None:
self.cname = name
return self
def set_axis(self, axis):
""" set the axis over which I index """
self.axis = axis
return self
def set_pos(self, pos):
""" set the position of this column in the Table """
self.pos = pos
if pos is not None and self.typ is not None:
self.typ._v_pos = pos
return self
def set_table(self, table):
self.table = table
return self
def __unicode__(self):
temp = tuple(
map(pprint_thing,
(self.name,
self.cname,
self.axis,
self.pos,
self.kind)))
return "name->%s,cname->%s,axis->%s,pos->%s,kind->%s" % temp
def __eq__(self, other):
""" compare 2 col items """
return all([getattr(self, a, None) == getattr(other, a, None)
for a in ['name', 'cname', 'axis', 'pos']])
def __ne__(self, other):
return not self.__eq__(other)
@property
def is_indexed(self):
""" return whether I am an indexed column """
try:
return getattr(self.table.cols, self.cname).is_indexed
except:
False
def copy(self):
new_self = copy.copy(self)
return new_self
def infer(self, handler):
"""infer this column from the table: create and return a new object"""
table = handler.table
new_self = self.copy()
new_self.set_table(table)
new_self.get_attr()
new_self.read_metadata(handler)
return new_self
def convert(self, values, nan_rep, encoding):
""" set the values from this selection: take = take ownership """
try:
values = values[self.cname]
except:
pass
values = _maybe_convert(values, self.kind, encoding)
kwargs = dict()
if self.freq is not None:
kwargs['freq'] = _ensure_decoded(self.freq)
if self.index_name is not None:
kwargs['name'] = _ensure_decoded(self.index_name)
try:
self.values = Index(values, **kwargs)
except:
# if the output freq is different that what we recorded,
# it should be None (see also 'doc example part 2')
if 'freq' in kwargs:
kwargs['freq'] = None
self.values = Index(values, **kwargs)
self.values = _set_tz(self.values, self.tz)
return self
def take_data(self):
""" return the values & release the memory """
self.values, values = None, self.values
return values
@property
def attrs(self):
return self.table._v_attrs
@property
def description(self):
return self.table.description
@property
def col(self):
""" return my current col description """
return getattr(self.description, self.cname, None)
@property
def cvalues(self):
""" return my cython values """
return self.values
def __iter__(self):
return iter(self.values)
def maybe_set_size(self, min_itemsize=None, **kwargs):
""" maybe set a string col itemsize:
min_itemsize can be an interger or a dict with this columns name
with an integer size """
if _ensure_decoded(self.kind) == u('string'):
if isinstance(min_itemsize, dict):
min_itemsize = min_itemsize.get(self.name)
if min_itemsize is not None and self.typ.itemsize < min_itemsize:
self.typ = _tables(
).StringCol(itemsize=min_itemsize, pos=self.pos)
def validate(self, handler, append, **kwargs):
self.validate_names()
def validate_names(self):
pass
def validate_and_set(self, handler, append, **kwargs):
self.set_table(handler.table)
self.validate_col()
self.validate_attr(append)
self.validate_metadata(handler)
self.write_metadata(handler)
self.set_attr()
def validate_col(self, itemsize=None):
""" validate this column: return the compared against itemsize """
# validate this column for string truncation (or reset to the max size)
if _ensure_decoded(self.kind) == u('string'):
c = self.col
if c is not None:
if itemsize is None:
itemsize = self.itemsize
if c.itemsize < itemsize:
raise ValueError(
"Trying to store a string with len [%s] in [%s] "
"column but\nthis column has a limit of [%s]!\n"
"Consider using min_itemsize to preset the sizes on "
"these columns" % (itemsize, self.cname, c.itemsize))
return c.itemsize
return None
def validate_attr(self, append):
# check for backwards incompatibility
if append:
existing_kind = getattr(self.attrs, self.kind_attr, None)
if existing_kind is not None and existing_kind != self.kind:
raise TypeError("incompatible kind in col [%s - %s]" %
(existing_kind, self.kind))
def update_info(self, info):
""" set/update the info for this indexable with the key/value
if there is a conflict raise/warn as needed """
for key in self._info_fields:
value = getattr(self, key, None)
idx = _get_info(info, self.name)
existing_value = idx.get(key)
if key in idx and value is not None and existing_value != value:
# frequency/name just warn
if key in ['freq', 'index_name']:
ws = attribute_conflict_doc % (key, existing_value, value)
warnings.warn(ws, AttributeConflictWarning, stacklevel=6)
# reset
idx[key] = None
setattr(self, key, None)
else:
raise ValueError(
"invalid info for [%s] for [%s], existing_value [%s] "
"conflicts with new value [%s]"
% (self.name, key, existing_value, value))
else:
if value is not None or existing_value is not None:
idx[key] = value
return self
def set_info(self, info):
""" set my state from the passed info """
idx = info.get(self.name)
if idx is not None:
self.__dict__.update(idx)
def get_attr(self):
""" set the kind for this colummn """
self.kind = getattr(self.attrs, self.kind_attr, None)
def set_attr(self):
""" set the kind for this colummn """
setattr(self.attrs, self.kind_attr, self.kind)
def read_metadata(self, handler):
""" retrieve the metadata for this columns """
self.metadata = handler.read_metadata(self.cname)
def validate_metadata(self, handler):
""" validate that kind=category does not change the categories """
if self.meta == 'category':
new_metadata = self.metadata
cur_metadata = handler.read_metadata(self.cname)
if new_metadata is not None and cur_metadata is not None \
and not com.array_equivalent(new_metadata, cur_metadata):
raise ValueError("cannot append a categorical with different categories"
" to the existing")
def write_metadata(self, handler):
""" set the meta data """
if self.metadata is not None:
handler.write_metadata(self.cname,self.metadata)
class GenericIndexCol(IndexCol):
""" an index which is not represented in the data of the table """
@property
def is_indexed(self):
return False
def convert(self, values, nan_rep, encoding):
""" set the values from this selection: take = take ownership """
self.values = Int64Index(np.arange(self.table.nrows))
return self
def get_attr(self):
pass
def set_attr(self):
pass
class DataCol(IndexCol):
""" a data holding column, by definition this is not indexable
Parameters
----------
data : the actual data
cname : the column name in the table to hold the data (typically
values)
meta : a string description of the metadata
metadata : the actual metadata
"""
is_an_indexable = False
is_data_indexable = False
_info_fields = ['tz','ordered']
@classmethod
def create_for_block(
cls, i=None, name=None, cname=None, version=None, **kwargs):
""" return a new datacol with the block i """
if cname is None:
cname = name or 'values_block_%d' % i
if name is None:
name = cname
# prior to 0.10.1, we named values blocks like: values_block_0 an the
# name values_0
try:
if version[0] == 0 and version[1] <= 10 and version[2] == 0:
m = re.search("values_block_(\d+)", name)
if m:
name = "values_%s" % m.groups()[0]
except:
pass
return cls(name=name, cname=cname, **kwargs)
def __init__(self, values=None, kind=None, typ=None,
cname=None, data=None, meta=None, metadata=None, block=None, **kwargs):
super(DataCol, self).__init__(
values=values, kind=kind, typ=typ, cname=cname, **kwargs)
self.dtype = None
self.dtype_attr = u("%s_dtype" % self.name)
self.meta = meta
self.meta_attr = u("%s_meta" % self.name)
self.set_data(data)
self.set_metadata(metadata)
def __unicode__(self):
temp = tuple(
map(pprint_thing,
(self.name,
self.cname,
self.dtype,
self.kind,
self.shape)))
return "name->%s,cname->%s,dtype->%s,kind->%s,shape->%s" % temp
def __eq__(self, other):
""" compare 2 col items """
return all([getattr(self, a, None) == getattr(other, a, None)
for a in ['name', 'cname', 'dtype', 'pos']])
def set_data(self, data, dtype=None):
self.data = data
if data is not None:
if dtype is not None:
self.dtype = dtype
self.set_kind()
elif self.dtype is None:
self.dtype = data.dtype.name
self.set_kind()
def take_data(self):
""" return the data & release the memory """
self.data, data = None, self.data
return data
def set_metadata(self, metadata):
""" record the metadata """
if metadata is not None:
metadata = np.array(metadata,copy=False).ravel()
self.metadata = metadata
def set_kind(self):
# set my kind if we can
if self.dtype is not None:
dtype = _ensure_decoded(self.dtype)
if dtype.startswith(u('string')) or dtype.startswith(u('bytes')):
self.kind = 'string'
elif dtype.startswith(u('float')):
self.kind = 'float'
elif dtype.startswith(u('complex')):
self.kind = 'complex'
elif dtype.startswith(u('int')) or dtype.startswith(u('uint')):
self.kind = 'integer'
elif dtype.startswith(u('date')):
self.kind = 'datetime'
elif dtype.startswith(u('timedelta')):
self.kind = 'timedelta'
elif dtype.startswith(u('bool')):
self.kind = 'bool'
else:
raise AssertionError(
"cannot interpret dtype of [%s] in [%s]" % (dtype, self))
# set my typ if we need
if self.typ is None:
self.typ = getattr(self.description, self.cname, None)
def set_atom(self, block, block_items, existing_col, min_itemsize,
nan_rep, info, encoding=None, **kwargs):
""" create and setup my atom from the block b """
self.values = list(block_items)
# short-cut certain block types
if block.is_categorical:
return self.set_atom_categorical(block, items=block_items, info=info)
elif block.is_datetimetz:
return self.set_atom_datetime64tz(block, info=info)
elif block.is_datetime:
return self.set_atom_datetime64(block)
elif block.is_timedelta:
return self.set_atom_timedelta64(block)
elif block.is_complex:
return self.set_atom_complex(block)
dtype = block.dtype.name
inferred_type = lib.infer_dtype(block.values)
if inferred_type == 'date':
raise TypeError(
"[date] is not implemented as a table column")
elif inferred_type == 'datetime':
# after 8260
# this only would be hit for a mutli-timezone dtype
# which is an error
raise TypeError(
"too many timezones in this block, create separate "
"data columns"
)
elif inferred_type == 'unicode':
raise TypeError(
"[unicode] is not implemented as a table column")
# this is basically a catchall; if say a datetime64 has nans then will
# end up here ###
elif inferred_type == 'string' or dtype == 'object':
self.set_atom_string(
block, block_items,
existing_col,
min_itemsize,
nan_rep,
encoding)
# set as a data block
else:
self.set_atom_data(block)
def get_atom_string(self, block, itemsize):
return _tables().StringCol(itemsize=itemsize, shape=block.shape[0])
def set_atom_string(self, block, block_items, existing_col, min_itemsize,
nan_rep, encoding):
# fill nan items with myself, don't disturb the blocks by
# trying to downcast
block = block.fillna(nan_rep, downcast=False)
if isinstance(block, list):
block = block[0]
data = block.values
# see if we have a valid string type
inferred_type = lib.infer_dtype(data.ravel())
if inferred_type != 'string':
# we cannot serialize this data, so report an exception on a column
# by column basis
for i, item in enumerate(block_items):
col = block.iget(i)
inferred_type = lib.infer_dtype(col.ravel())
if inferred_type != 'string':
raise TypeError(
"Cannot serialize the column [%s] because\n"
"its data contents are [%s] object dtype"
% (item, inferred_type)
)
# itemsize is the maximum length of a string (along any dimension)
data_converted = _convert_string_array(data, encoding)
itemsize = data_converted.itemsize
# specified min_itemsize?
if isinstance(min_itemsize, dict):
min_itemsize = int(min_itemsize.get(
self.name) or min_itemsize.get('values') or 0)
itemsize = max(min_itemsize or 0, itemsize)
# check for column in the values conflicts
if existing_col is not None:
eci = existing_col.validate_col(itemsize)
if eci > itemsize:
itemsize = eci
self.itemsize = itemsize
self.kind = 'string'
self.typ = self.get_atom_string(block, itemsize)
self.set_data(data_converted.astype('|S%d' % itemsize, copy=False))
def get_atom_coltype(self, kind=None):
""" return the PyTables column class for this column """
if kind is None:
kind = self.kind
if self.kind.startswith('uint'):
col_name = "UInt%sCol" % kind[4:]
else:
col_name = "%sCol" % kind.capitalize()
return getattr(_tables(), col_name)
def get_atom_data(self, block, kind=None):
return self.get_atom_coltype(kind=kind)(shape=block.shape[0])
def set_atom_complex(self, block):
self.kind = block.dtype.name
itemsize = int(self.kind.split('complex')[-1]) // 8
self.typ = _tables().ComplexCol(itemsize=itemsize, shape=block.shape[0])
self.set_data(block.values.astype(self.typ.type, copy=False))
def set_atom_data(self, block):
self.kind = block.dtype.name
self.typ = self.get_atom_data(block)
self.set_data(block.values.astype(self.typ.type, copy=False))
def set_atom_categorical(self, block, items, info=None, values=None):
# currently only supports a 1-D categorical
# in a 1-D block
values = block.values
codes = values.codes
self.kind = 'integer'
self.dtype = codes.dtype.name
if values.ndim > 1:
raise NotImplementedError("only support 1-d categoricals")
if len(items) > 1:
raise NotImplementedError("only support single block categoricals")
# write the codes; must be in a block shape
self.ordered = values.ordered
self.typ = self.get_atom_data(block, kind=codes.dtype.name)
self.set_data(_block_shape(codes))
# write the categories
self.meta = 'category'
self.set_metadata(block.values.categories)
# update the info
self.update_info(info)
def get_atom_datetime64(self, block):
return _tables().Int64Col(shape=block.shape[0])
def set_atom_datetime64(self, block, values=None):
self.kind = 'datetime64'
self.typ = self.get_atom_datetime64(block)
if values is None:
values = block.values.view('i8')
self.set_data(values, 'datetime64')
def set_atom_datetime64tz(self, block, info, values=None):
if values is None:
values = block.values
# convert this column to i8 in UTC, and save the tz
values = values.asi8.reshape(block.shape)
# store a converted timezone
self.tz = _get_tz(block.values.tz)
self.update_info(info)
self.kind = 'datetime64'
self.typ = self.get_atom_datetime64(block)
self.set_data(values, 'datetime64')
def get_atom_timedelta64(self, block):
return _tables().Int64Col(shape=block.shape[0])
def set_atom_timedelta64(self, block, values=None):
self.kind = 'timedelta64'
self.typ = self.get_atom_timedelta64(block)
if values is None:
values = block.values.view('i8')
self.set_data(values, 'timedelta64')
@property
def shape(self):
return getattr(self.data, 'shape', None)
@property
def cvalues(self):
""" return my cython values """
return self.data
def validate_attr(self, append):
"""validate that we have the same order as the existing & same dtype"""
if append:
existing_fields = getattr(self.attrs, self.kind_attr, None)
if (existing_fields is not None and
existing_fields != list(self.values)):
raise ValueError("appended items do not match existing items"
" in table!")
existing_dtype = getattr(self.attrs, self.dtype_attr, None)
if (existing_dtype is not None and
existing_dtype != self.dtype):
raise ValueError("appended items dtype do not match existing "
"items dtype in table!")
def convert(self, values, nan_rep, encoding):
"""set the data from this selection (and convert to the correct dtype
if we can)
"""
try:
values = values[self.cname]
except:
pass
self.set_data(values)
# use the meta if needed
meta = _ensure_decoded(self.meta)
# convert to the correct dtype
if self.dtype is not None:
dtype = _ensure_decoded(self.dtype)
# reverse converts
if dtype == u('datetime64'):
# recreate with tz if indicated
self.data = _set_tz(self.data, self.tz, coerce=True)
elif dtype == u('timedelta64'):
self.data = np.asarray(self.data, dtype='m8[ns]')
elif dtype == u('date'):
try:
self.data = np.asarray(
[date.fromordinal(v) for v in self.data], dtype=object)
except ValueError:
self.data = np.asarray(
[date.fromtimestamp(v) for v in self.data],
dtype=object)
elif dtype == u('datetime'):
self.data = np.asarray(
[datetime.fromtimestamp(v) for v in self.data],
dtype=object)
elif meta == u('category'):
# we have a categorical
categories = self.metadata
self.data = Categorical.from_codes(self.data.ravel(),
categories=categories,
ordered=self.ordered)
else:
try:
self.data = self.data.astype(dtype, copy=False)
except:
self.data = self.data.astype('O', copy=False)
# convert nans / decode
if _ensure_decoded(self.kind) == u('string'):
self.data = _unconvert_string_array(
self.data, nan_rep=nan_rep, encoding=encoding)
return self
def get_attr(self):
""" get the data for this colummn """
self.values = getattr(self.attrs, self.kind_attr, None)
self.dtype = getattr(self.attrs, self.dtype_attr, None)
self.meta = getattr(self.attrs, self.meta_attr, None)
self.set_kind()
def set_attr(self):
""" set the data for this colummn """
setattr(self.attrs, self.kind_attr, self.values)
setattr(self.attrs, self.meta_attr, self.meta)
if self.dtype is not None:
setattr(self.attrs, self.dtype_attr, self.dtype)
class DataIndexableCol(DataCol):
""" represent a data column that can be indexed """
is_data_indexable = True
def validate_names(self):
if not Index(self.values).is_object():
raise ValueError("cannot have non-object label DataIndexableCol")
def get_atom_string(self, block, itemsize):
return _tables().StringCol(itemsize=itemsize)
def get_atom_data(self, block, kind=None):
return self.get_atom_coltype(kind=kind)()
def get_atom_datetime64(self, block):
return _tables().Int64Col()
def get_atom_timedelta64(self, block):
return _tables().Int64Col()
class GenericDataIndexableCol(DataIndexableCol):
""" represent a generic pytables data column """
def get_attr(self):
pass
class Fixed(StringMixin):
""" represent an object in my store
facilitate read/write of various types of objects
this is an abstract base class
Parameters
----------
parent : my parent HDFStore
group : the group node where the table resides
"""
pandas_kind = None
obj_type = None
ndim = None
is_table = False
def __init__(self, parent, group, encoding=None, **kwargs):
self.parent = parent
self.group = group
self.encoding = _ensure_encoding(encoding)
self.set_version()
@property
def is_old_version(self):
return (self.version[0] <= 0 and self.version[1] <= 10 and
self.version[2] < 1)
def set_version(self):
""" compute and set our version """
version = _ensure_decoded(
getattr(self.group._v_attrs, 'pandas_version', None))
try:
self.version = tuple([int(x) for x in version.split('.')])
if len(self.version) == 2:
self.version = self.version + (0,)
except:
self.version = (0, 0, 0)
@property
def pandas_type(self):
return _ensure_decoded(getattr(self.group._v_attrs,
'pandas_type', None))
@property
def format_type(self):
return 'fixed'
def __unicode__(self):
""" return a pretty representation of myself """
self.infer_axes()
s = self.shape
if s is not None:
if isinstance(s, (list, tuple)):
s = "[%s]" % ','.join([pprint_thing(x) for x in s])
return "%-12.12s (shape->%s)" % (self.pandas_type, s)
return self.pandas_type
def set_object_info(self):
""" set my pandas type & version """
self.attrs.pandas_type = str(self.pandas_kind)
self.attrs.pandas_version = str(_version)
self.set_version()
def copy(self):
new_self = copy.copy(self)
return new_self
@property
def storage_obj_type(self):
return self.obj_type
@property
def shape(self):
return self.nrows
@property
def pathname(self):
return self.group._v_pathname
@property
def _handle(self):
return self.parent._handle
@property
def _filters(self):
return self.parent._filters
@property
def _complevel(self):
return self.parent._complevel
@property
def _fletcher32(self):
return self.parent._fletcher32
@property
def _complib(self):
return self.parent._complib
@property
def attrs(self):
return self.group._v_attrs
def set_attrs(self):
""" set our object attributes """
pass
def get_attrs(self):
""" get our object attributes """
pass
@property
def storable(self):
""" return my storable """
return self.group
@property
def is_exists(self):
return False
@property
def nrows(self):
return getattr(self.storable, 'nrows', None)
def validate(self, other):
""" validate against an existing storable """
if other is None:
return
return True
def validate_version(self, where=None):
""" are we trying to operate on an old version? """
return True
def infer_axes(self):
""" infer the axes of my storer
return a boolean indicating if we have a valid storer or not """
s = self.storable
if s is None:
return False
self.get_attrs()
return True
def read(self, **kwargs):
raise NotImplementedError(
"cannot read on an abstract storer: subclasses should implement")
def write(self, **kwargs):
raise NotImplementedError(
"cannot write on an abstract storer: sublcasses should implement")
def delete(self, where=None, start=None, stop=None, **kwargs):
""" support fully deleting the node in its entirety (only) - where specification must be None """
if where is None and start is None and stop is None:
self._handle.remove_node(self.group, recursive=True)
return None
raise TypeError("cannot delete on an abstract storer")
class GenericFixed(Fixed):
""" a generified fixed version """
_index_type_map = {DatetimeIndex: 'datetime', PeriodIndex: 'period'}
_reverse_index_map = dict([(v, k)
for k, v in compat.iteritems(_index_type_map)])
attributes = []
# indexer helpders
def _class_to_alias(self, cls):
return self._index_type_map.get(cls, '')
def _alias_to_class(self, alias):
if isinstance(alias, type): # pragma: no cover
# compat: for a short period of time master stored types
return alias
return self._reverse_index_map.get(alias, Index)
def _get_index_factory(self, klass):
if klass == DatetimeIndex:
def f(values, freq=None, tz=None):
return DatetimeIndex._simple_new(values, None, freq=freq,
tz=tz)
return f
return klass
def validate_read(self, kwargs):
if kwargs.get('columns') is not None:
raise TypeError("cannot pass a column specification when reading "
"a Fixed format store. this store must be "
"selected in its entirety")
if kwargs.get('where') is not None:
raise TypeError("cannot pass a where specification when reading "
"from a Fixed format store. this store must be "
"selected in its entirety")
@property
def is_exists(self):
return True
def set_attrs(self):
""" set our object attributes """
self.attrs.encoding = self.encoding
def get_attrs(self):
""" retrieve our attributes """
self.encoding = _ensure_encoding(getattr(self.attrs, 'encoding', None))
for n in self.attributes:
setattr(self, n, _ensure_decoded(getattr(self.attrs, n, None)))
def write(self, obj, **kwargs):
self.set_attrs()
def read_array(self, key):
""" read an array for the specified node (off of group """
import tables
node = getattr(self.group, key)
data = node[:]
attrs = node._v_attrs
transposed = getattr(attrs, 'transposed', False)
if isinstance(node, tables.VLArray):
ret = data[0]
else:
dtype = getattr(attrs, 'value_type', None)
shape = getattr(attrs, 'shape', None)
if shape is not None:
# length 0 axis
ret = np.empty(shape, dtype=dtype)
else:
ret = data
if dtype == u('datetime64'):
# reconstruct a timezone if indicated
ret = _set_tz(ret, getattr(attrs, 'tz', None), coerce=True)
elif dtype == u('timedelta64'):
ret = np.asarray(ret, dtype='m8[ns]')
if transposed:
return ret.T
else:
return ret
def read_index(self, key):
variety = _ensure_decoded(getattr(self.attrs, '%s_variety' % key))
if variety == u('multi'):
return self.read_multi_index(key)
elif variety == u('block'):
return self.read_block_index(key)
elif variety == u('sparseint'):
return self.read_sparse_intindex(key)
elif variety == u('regular'):
_, index = self.read_index_node(getattr(self.group, key))
return index
else: # pragma: no cover
raise TypeError('unrecognized index variety: %s' % variety)
def write_index(self, key, index):
if isinstance(index, MultiIndex):
setattr(self.attrs, '%s_variety' % key, 'multi')
self.write_multi_index(key, index)
elif isinstance(index, BlockIndex):
setattr(self.attrs, '%s_variety' % key, 'block')
self.write_block_index(key, index)
elif isinstance(index, IntIndex):
setattr(self.attrs, '%s_variety' % key, 'sparseint')
self.write_sparse_intindex(key, index)
else:
setattr(self.attrs, '%s_variety' % key, 'regular')
converted = _convert_index(index, self.encoding,
self.format_type).set_name('index')
self.write_array(key, converted.values)
node = getattr(self.group, key)
node._v_attrs.kind = converted.kind
node._v_attrs.name = index.name
if isinstance(index, (DatetimeIndex, PeriodIndex)):
node._v_attrs.index_class = self._class_to_alias(type(index))
if hasattr(index, 'freq'):
node._v_attrs.freq = index.freq
if hasattr(index, 'tz') and index.tz is not None:
node._v_attrs.tz = _get_tz(index.tz)
def write_block_index(self, key, index):
self.write_array('%s_blocs' % key, index.blocs)
self.write_array('%s_blengths' % key, index.blengths)
setattr(self.attrs, '%s_length' % key, index.length)
def read_block_index(self, key):
length = getattr(self.attrs, '%s_length' % key)
blocs = self.read_array('%s_blocs' % key)
blengths = self.read_array('%s_blengths' % key)
return BlockIndex(length, blocs, blengths)
def write_sparse_intindex(self, key, index):
self.write_array('%s_indices' % key, index.indices)
setattr(self.attrs, '%s_length' % key, index.length)
def read_sparse_intindex(self, key):
length = getattr(self.attrs, '%s_length' % key)
indices = self.read_array('%s_indices' % key)
return IntIndex(length, indices)
def write_multi_index(self, key, index):
setattr(self.attrs, '%s_nlevels' % key, index.nlevels)
for i, (lev, lab, name) in enumerate(zip(index.levels,
index.labels,
index.names)):
# write the level
level_key = '%s_level%d' % (key, i)
conv_level = _convert_index(lev, self.encoding,
self.format_type).set_name(level_key)
self.write_array(level_key, conv_level.values)
node = getattr(self.group, level_key)
node._v_attrs.kind = conv_level.kind
node._v_attrs.name = name
# write the name
setattr(node._v_attrs, '%s_name%d' % (key, i), name)
# write the labels
label_key = '%s_label%d' % (key, i)
self.write_array(label_key, lab)
def read_multi_index(self, key):
nlevels = getattr(self.attrs, '%s_nlevels' % key)
levels = []
labels = []
names = []
for i in range(nlevels):
level_key = '%s_level%d' % (key, i)
name, lev = self.read_index_node(getattr(self.group, level_key))
levels.append(lev)
names.append(name)
label_key = '%s_label%d' % (key, i)
lab = self.read_array(label_key)
labels.append(lab)
return MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=True)
def read_index_node(self, node):
data = node[:]
# If the index was an empty array write_array_empty() will
# have written a sentinel. Here we relace it with the original.
if ('shape' in node._v_attrs and
self._is_empty_array(getattr(node._v_attrs, 'shape'))):
data = np.empty(getattr(node._v_attrs, 'shape'),
dtype=getattr(node._v_attrs, 'value_type'))
kind = _ensure_decoded(node._v_attrs.kind)
name = None
if 'name' in node._v_attrs:
name = node._v_attrs.name
index_class = self._alias_to_class(getattr(node._v_attrs,
'index_class', ''))
factory = self._get_index_factory(index_class)
kwargs = {}
if u('freq') in node._v_attrs:
kwargs['freq'] = node._v_attrs['freq']
if u('tz') in node._v_attrs:
kwargs['tz'] = node._v_attrs['tz']
if kind in (u('date'), u('datetime')):
index = factory(
_unconvert_index(data, kind, encoding=self.encoding),
dtype=object, **kwargs)
else:
index = factory(
_unconvert_index(data, kind, encoding=self.encoding), **kwargs)
index.name = name
return name, index
def write_array_empty(self, key, value):
""" write a 0-len array """
# ugly hack for length 0 axes
arr = np.empty((1,) * value.ndim)
self._handle.create_array(self.group, key, arr)
getattr(self.group, key)._v_attrs.value_type = str(value.dtype)
getattr(self.group, key)._v_attrs.shape = value.shape
def _is_empty_array(self, shape):
"""Returns true if any axis is zero length."""
return any(x == 0 for x in shape)
def write_array(self, key, value, items=None):
if key in self.group:
self._handle.remove_node(self.group, key)
# Transform needed to interface with pytables row/col notation
empty_array = self._is_empty_array(value.shape)
transposed = False
if com.is_categorical_dtype(value):
raise NotImplementedError("cannot store a category dtype")
if not empty_array:
value = value.T
transposed = True
if self._filters is not None:
atom = None
try:
# get the atom for this datatype
atom = _tables().Atom.from_dtype(value.dtype)
except ValueError:
pass
if atom is not None:
# create an empty chunked array and fill it from value
if not empty_array:
ca = self._handle.create_carray(self.group, key, atom,
value.shape,
filters=self._filters)
ca[:] = value
getattr(self.group, key)._v_attrs.transposed = transposed
else:
self.write_array_empty(key, value)
return
if value.dtype.type == np.object_:
# infer the type, warn if we have a non-string type here (for
# performance)
inferred_type = lib.infer_dtype(value.ravel())
if empty_array:
pass
elif inferred_type == 'string':
pass
else:
try:
items = list(items)
except:
pass
ws = performance_doc % (inferred_type, key, items)
warnings.warn(ws, PerformanceWarning, stacklevel=7)
vlarr = self._handle.create_vlarray(self.group, key,
_tables().ObjectAtom())
vlarr.append(value)
else:
if empty_array:
self.write_array_empty(key, value)
else:
if com.is_datetime64_dtype(value.dtype):
self._handle.create_array(self.group, key, value.view('i8'))
getattr(
self.group, key)._v_attrs.value_type = 'datetime64'
elif com.is_datetime64tz_dtype(value.dtype):
# store as UTC
# with a zone
self._handle.create_array(self.group, key,
value.asi8)
node = getattr(self.group, key)
node._v_attrs.tz = _get_tz(value.tz)
node._v_attrs.value_type = 'datetime64'
elif com.is_timedelta64_dtype(value.dtype):
self._handle.create_array(self.group, key, value.view('i8'))
getattr(
self.group, key)._v_attrs.value_type = 'timedelta64'
else:
self._handle.create_array(self.group, key, value)
getattr(self.group, key)._v_attrs.transposed = transposed
class LegacyFixed(GenericFixed):
def read_index_legacy(self, key):
node = getattr(self.group, key)
data = node[:]
kind = node._v_attrs.kind
return _unconvert_index_legacy(data, kind, encoding=self.encoding)
class LegacySeriesFixed(LegacyFixed):
def read(self, **kwargs):
self.validate_read(kwargs)
index = self.read_index_legacy('index')
values = self.read_array('values')
return Series(values, index=index)
class LegacyFrameFixed(LegacyFixed):
def read(self, **kwargs):
self.validate_read(kwargs)
index = self.read_index_legacy('index')
columns = self.read_index_legacy('columns')
values = self.read_array('values')
return DataFrame(values, index=index, columns=columns)
class SeriesFixed(GenericFixed):
pandas_kind = u('series')
attributes = ['name']
@property
def shape(self):
try:
return len(getattr(self.group, 'values')),
except:
return None
def read(self, **kwargs):
self.validate_read(kwargs)
index = self.read_index('index')
values = self.read_array('values')
return Series(values, index=index, name=self.name)
def write(self, obj, **kwargs):
super(SeriesFixed, self).write(obj, **kwargs)
self.write_index('index', obj.index)
self.write_array('values', obj.values)
self.attrs.name = obj.name
class SparseSeriesFixed(GenericFixed):
pandas_kind = u('sparse_series')
attributes = ['name', 'fill_value', 'kind']
def read(self, **kwargs):
self.validate_read(kwargs)
index = self.read_index('index')
sp_values = self.read_array('sp_values')
sp_index = self.read_index('sp_index')
return SparseSeries(sp_values, index=index, sparse_index=sp_index,
kind=self.kind or u('block'),
fill_value=self.fill_value,
name=self.name)
def write(self, obj, **kwargs):
super(SparseSeriesFixed, self).write(obj, **kwargs)
self.write_index('index', obj.index)
self.write_index('sp_index', obj.sp_index)
self.write_array('sp_values', obj.sp_values)
self.attrs.name = obj.name
self.attrs.fill_value = obj.fill_value
self.attrs.kind = obj.kind
class SparseFrameFixed(GenericFixed):
pandas_kind = u('sparse_frame')
attributes = ['default_kind', 'default_fill_value']
def read(self, **kwargs):
self.validate_read(kwargs)
columns = self.read_index('columns')
sdict = {}
for c in columns:
key = 'sparse_series_%s' % c
s = SparseSeriesFixed(self.parent, getattr(self.group, key))
s.infer_axes()
sdict[c] = s.read()
return SparseDataFrame(sdict, columns=columns,
default_kind=self.default_kind,
default_fill_value=self.default_fill_value)
def write(self, obj, **kwargs):
""" write it as a collection of individual sparse series """
super(SparseFrameFixed, self).write(obj, **kwargs)
for name, ss in compat.iteritems(obj):
key = 'sparse_series_%s' % name
if key not in self.group._v_children:
node = self._handle.create_group(self.group, key)
else:
node = getattr(self.group, key)
s = SparseSeriesFixed(self.parent, node)
s.write(ss)
self.attrs.default_fill_value = obj.default_fill_value
self.attrs.default_kind = obj.default_kind
self.write_index('columns', obj.columns)
class SparsePanelFixed(GenericFixed):
pandas_kind = u('sparse_panel')
attributes = ['default_kind', 'default_fill_value']
def read(self, **kwargs):
self.validate_read(kwargs)
items = self.read_index('items')
sdict = {}
for name in items:
key = 'sparse_frame_%s' % name
s = SparseFrameFixed(self.parent, getattr(self.group, key))
s.infer_axes()
sdict[name] = s.read()
return SparsePanel(sdict, items=items, default_kind=self.default_kind,
default_fill_value=self.default_fill_value)
def write(self, obj, **kwargs):
super(SparsePanelFixed, self).write(obj, **kwargs)
self.attrs.default_fill_value = obj.default_fill_value
self.attrs.default_kind = obj.default_kind
self.write_index('items', obj.items)
for name, sdf in compat.iteritems(obj):
key = 'sparse_frame_%s' % name
if key not in self.group._v_children:
node = self._handle.create_group(self.group, key)
else:
node = getattr(self.group, key)
s = SparseFrameFixed(self.parent, node)
s.write(sdf)
class BlockManagerFixed(GenericFixed):
attributes = ['ndim', 'nblocks']
is_shape_reversed = False
@property
def shape(self):
try:
ndim = self.ndim
# items
items = 0
for i in range(self.nblocks):
node = getattr(self.group, 'block%d_items' % i)
shape = getattr(node, 'shape', None)
if shape is not None:
items += shape[0]
# data shape
node = getattr(self.group, 'block0_values')
shape = getattr(node, 'shape', None)
if shape is not None:
shape = list(shape[0:(ndim - 1)])
else:
shape = []
shape.append(items)
# hacky - this works for frames, but is reversed for panels
if self.is_shape_reversed:
shape = shape[::-1]
return shape
except:
return None
def read(self, **kwargs):
self.validate_read(kwargs)
axes = []
for i in range(self.ndim):
ax = self.read_index('axis%d' % i)
axes.append(ax)
items = axes[0]
blocks = []
for i in range(self.nblocks):
blk_items = self.read_index('block%d_items' % i)
values = self.read_array('block%d_values' % i)
blk = make_block(values,
placement=items.get_indexer(blk_items))
blocks.append(blk)
return self.obj_type(BlockManager(blocks, axes))
def write(self, obj, **kwargs):
super(BlockManagerFixed, self).write(obj, **kwargs)
data = obj._data
if not data.is_consolidated():
data = data.consolidate()
self.attrs.ndim = data.ndim
for i, ax in enumerate(data.axes):
if i == 0:
if not ax.is_unique:
raise ValueError("Columns index has to be unique for fixed format")
self.write_index('axis%d' % i, ax)
# Supporting mixed-type DataFrame objects...nontrivial
self.attrs.nblocks = len(data.blocks)
for i, blk in enumerate(data.blocks):
# I have no idea why, but writing values before items fixed #2299
blk_items = data.items.take(blk.mgr_locs)
self.write_array('block%d_values' % i, blk.values, items=blk_items)
self.write_index('block%d_items' % i, blk_items)
class FrameFixed(BlockManagerFixed):
pandas_kind = u('frame')
obj_type = DataFrame
class PanelFixed(BlockManagerFixed):
pandas_kind = u('wide')
obj_type = Panel
is_shape_reversed = True
def write(self, obj, **kwargs):
obj._consolidate_inplace()
return super(PanelFixed, self).write(obj, **kwargs)
class Table(Fixed):
""" represent a table:
facilitate read/write of various types of tables
Attrs in Table Node
-------------------
These are attributes that are store in the main table node, they are
necessary to recreate these tables when read back in.
index_axes : a list of tuples of the (original indexing axis and
index column)
non_index_axes: a list of tuples of the (original index axis and
columns on a non-indexing axis)
values_axes : a list of the columns which comprise the data of this
table
data_columns : a list of the columns that we are allowing indexing
(these become single columns in values_axes), or True to force all
columns
nan_rep : the string to use for nan representations for string
objects
levels : the names of levels
metadata : the names of the metadata columns
"""
pandas_kind = u('wide_table')
table_type = None
levels = 1
is_table = True
is_shape_reversed = False
def __init__(self, *args, **kwargs):
super(Table, self).__init__(*args, **kwargs)
self.index_axes = []
self.non_index_axes = []
self.values_axes = []
self.data_columns = []
self.metadata = []
self.info = dict()
self.nan_rep = None
self.selection = None
@property
def table_type_short(self):
return self.table_type.split('_')[0]
@property
def format_type(self):
return 'table'
def __unicode__(self):
""" return a pretty representatgion of myself """
self.infer_axes()
dc = ",dc->[%s]" % ','.join(
self.data_columns) if len(self.data_columns) else ''
ver = ''
if self.is_old_version:
ver = "[%s]" % '.'.join([str(x) for x in self.version])
return "%-12.12s%s (typ->%s,nrows->%s,ncols->%s,indexers->[%s]%s)" % (
self.pandas_type, ver, self.table_type_short, self.nrows,
self.ncols, ','.join([a.name for a in self.index_axes]), dc
)
def __getitem__(self, c):
""" return the axis for c """
for a in self.axes:
if c == a.name:
return a
return None
def validate(self, other):
""" validate against an existing table """
if other is None:
return
if other.table_type != self.table_type:
raise TypeError("incompatible table_type with existing [%s - %s]" %
(other.table_type, self.table_type))
for c in ['index_axes', 'non_index_axes', 'values_axes']:
sv = getattr(self, c, None)
ov = getattr(other, c, None)
if sv != ov:
# show the error for the specific axes
for i, sax in enumerate(sv):
oax = ov[i]
if sax != oax:
raise ValueError(
"invalid combinate of [%s] on appending data [%s] "
"vs current table [%s]" % (c, sax, oax))
# should never get here
raise Exception(
"invalid combinate of [%s] on appending data [%s] vs "
"current table [%s]" % (c, sv, ov))
@property
def is_multi_index(self):
"""the levels attribute is 1 or a list in the case of a multi-index"""
return isinstance(self.levels, list)
def validate_metadata(self, existing):
""" create / validate metadata """
self.metadata = [ c.name for c in self.values_axes if c.metadata is not None ]
def validate_multiindex(self, obj):
"""validate that we can store the multi-index; reset and return the
new object
"""
levels = [l if l is not None else "level_{0}".format(i)
for i, l in enumerate(obj.index.names)]
try:
return obj.reset_index(), levels
except ValueError:
raise ValueError("duplicate names/columns in the multi-index when "
"storing as a table")
@property
def nrows_expected(self):
""" based on our axes, compute the expected nrows """
return np.prod([i.cvalues.shape[0] for i in self.index_axes])
@property
def is_exists(self):
""" has this table been created """
return u('table') in self.group
@property
def storable(self):
return getattr(self.group, 'table', None)
@property
def table(self):
""" return the table group (this is my storable) """
return self.storable
@property
def dtype(self):
return self.table.dtype
@property
def description(self):
return self.table.description
@property
def axes(self):
return itertools.chain(self.index_axes, self.values_axes)
@property
def ncols(self):
""" the number of total columns in the values axes """
return sum([len(a.values) for a in self.values_axes])
@property
def is_transposed(self):
return False
@property
def data_orientation(self):
"""return a tuple of my permutated axes, non_indexable at the front"""
return tuple(itertools.chain([int(a[0]) for a in self.non_index_axes],
[int(a.axis) for a in self.index_axes]))
def queryables(self):
""" return a dict of the kinds allowable columns for this object """
# compute the values_axes queryables
return dict(
[(a.cname, a) for a in self.index_axes] +
[(self.storage_obj_type._AXIS_NAMES[axis], None)
for axis, values in self.non_index_axes] +
[(v.cname, v) for v in self.values_axes
if v.name in set(self.data_columns)]
)
def index_cols(self):
""" return a list of my index cols """
return [(i.axis, i.cname) for i in self.index_axes]
def values_cols(self):
""" return a list of my values cols """
return [i.cname for i in self.values_axes]
def _get_metadata_path(self, key):
""" return the metadata pathname for this key """
return "{group}/meta/{key}/meta".format(group=self.group._v_pathname,
key=key)
def write_metadata(self, key, values):
"""
write out a meta data array to the key as a fixed-format Series
Parameters
----------
key : string
values : ndarray
"""
values = Series(values)
self.parent.put(self._get_metadata_path(key), values, format='table',
encoding=self.encoding, nan_rep=self.nan_rep)
def read_metadata(self, key):
""" return the meta data array for this key """
if getattr(getattr(self.group,'meta',None),key,None) is not None:
return self.parent.select(self._get_metadata_path(key))
return None
def set_info(self):
""" update our table index info """
self.attrs.info = self.info
def set_attrs(self):
""" set our table type & indexables """
self.attrs.table_type = str(self.table_type)
self.attrs.index_cols = self.index_cols()
self.attrs.values_cols = self.values_cols()
self.attrs.non_index_axes = self.non_index_axes
self.attrs.data_columns = self.data_columns
self.attrs.nan_rep = self.nan_rep
self.attrs.encoding = self.encoding
self.attrs.levels = self.levels
self.attrs.metadata = self.metadata
self.set_info()
def get_attrs(self):
""" retrieve our attributes """
self.non_index_axes = getattr(
self.attrs, 'non_index_axes', None) or []
self.data_columns = getattr(
self.attrs, 'data_columns', None) or []
self.info = getattr(
self.attrs, 'info', None) or dict()
self.nan_rep = getattr(self.attrs, 'nan_rep', None)
self.encoding = _ensure_encoding(
getattr(self.attrs, 'encoding', None))
self.levels = getattr(
self.attrs, 'levels', None) or []
self.index_axes = [
a.infer(self) for a in self.indexables if a.is_an_indexable
]
self.values_axes = [
a.infer(self) for a in self.indexables if not a.is_an_indexable
]
self.metadata = getattr(
self.attrs, 'metadata', None) or []
def validate_version(self, where=None):
""" are we trying to operate on an old version? """
if where is not None:
if (self.version[0] <= 0 and self.version[1] <= 10 and
self.version[2] < 1):
ws = incompatibility_doc % '.'.join(
[str(x) for x in self.version])
warnings.warn(ws, IncompatibilityWarning)
def validate_min_itemsize(self, min_itemsize):
"""validate the min_itemisze doesn't contain items that are not in the
axes this needs data_columns to be defined
"""
if min_itemsize is None:
return
if not isinstance(min_itemsize, dict):
return
q = self.queryables()
for k, v in min_itemsize.items():
# ok, apply generally
if k == 'values':
continue
if k not in q:
raise ValueError(
"min_itemsize has the key [%s] which is not an axis or "
"data_column" % k)
@property
def indexables(self):
""" create/cache the indexables if they don't exist """
if self._indexables is None:
self._indexables = []
# index columns
self._indexables.extend([
IndexCol(name=name, axis=axis, pos=i)
for i, (axis, name) in enumerate(self.attrs.index_cols)
])
# values columns
dc = set(self.data_columns)
base_pos = len(self._indexables)
def f(i, c):
klass = DataCol
if c in dc:
klass = DataIndexableCol
return klass.create_for_block(i=i, name=c, pos=base_pos + i,
version=self.version)
self._indexables.extend(
[f(i, c) for i, c in enumerate(self.attrs.values_cols)])
return self._indexables
def create_index(self, columns=None, optlevel=None, kind=None):
"""
Create a pytables index on the specified columns
note: cannot index Time64Col() or ComplexCol currently;
PyTables must be >= 3.0
Paramaters
----------
columns : False (don't create an index), True (create all columns
index), None or list_like (the indexers to index)
optlevel: optimization level (defaults to 6)
kind : kind of index (defaults to 'medium')
Exceptions
----------
raises if the node is not a table
"""
if not self.infer_axes():
return
if columns is False:
return
# index all indexables and data_columns
if columns is None or columns is True:
columns = [a.cname for a in self.axes if a.is_data_indexable]
if not isinstance(columns, (tuple, list)):
columns = [columns]
kw = dict()
if optlevel is not None:
kw['optlevel'] = optlevel
if kind is not None:
kw['kind'] = kind
table = self.table
for c in columns:
v = getattr(table.cols, c, None)
if v is not None:
# remove the index if the kind/optlevel have changed
if v.is_indexed:
index = v.index
cur_optlevel = index.optlevel
cur_kind = index.kind
if kind is not None and cur_kind != kind:
v.remove_index()
else:
kw['kind'] = cur_kind
if optlevel is not None and cur_optlevel != optlevel:
v.remove_index()
else:
kw['optlevel'] = cur_optlevel
# create the index
if not v.is_indexed:
if v.type.startswith('complex'):
raise TypeError('Columns containing complex values can be stored but cannot'
' be indexed when using table format. Either use fixed '
'format, set index=False, or do not include the columns '
'containing complex values to data_columns when '
'initializing the table.')
v.create_index(**kw)
def read_axes(self, where, **kwargs):
"""create and return the axes sniffed from the table: return boolean
for success
"""
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return False
# create the selection
self.selection = Selection(self, where=where, **kwargs)
values = self.selection.select()
# convert the data
for a in self.axes:
a.set_info(self.info)
a.convert(values, nan_rep=self.nan_rep, encoding=self.encoding)
return True
def get_object(self, obj):
""" return the data for this obj """
return obj
def validate_data_columns(self, data_columns, min_itemsize):
"""take the input data_columns and min_itemize and create a data
columns spec
"""
if not len(self.non_index_axes):
return []
axis, axis_labels = self.non_index_axes[0]
info = self.info.get(axis, dict())
if info.get('type') == 'MultiIndex' and data_columns:
raise ValueError("cannot use a multi-index on axis [{0}] with "
"data_columns {1}".format(axis, data_columns))
# evaluate the passed data_columns, True == use all columns
# take only valide axis labels
if data_columns is True:
data_columns = axis_labels
elif data_columns is None:
data_columns = []
# if min_itemsize is a dict, add the keys (exclude 'values')
if isinstance(min_itemsize, dict):
existing_data_columns = set(data_columns)
data_columns.extend([
k for k in min_itemsize.keys()
if k != 'values' and k not in existing_data_columns
])
# return valid columns in the order of our axis
return [c for c in data_columns if c in axis_labels]
def create_axes(self, axes, obj, validate=True, nan_rep=None,
data_columns=None, min_itemsize=None, **kwargs):
""" create and return the axes
leagcy tables create an indexable column, indexable index,
non-indexable fields
Parameters:
-----------
axes: a list of the axes in order to create (names or numbers of
the axes)
obj : the object to create axes on
validate: validate the obj against an existing object already
written
min_itemsize: a dict of the min size for a column in bytes
nan_rep : a values to use for string column nan_rep
encoding : the encoding for string values
data_columns : a list of columns that we want to create separate to
allow indexing (or True will force all columns)
"""
# set the default axes if needed
if axes is None:
try:
axes = _AXES_MAP[type(obj)]
except:
raise TypeError("cannot properly create the storer for: "
"[group->%s,value->%s]"
% (self.group._v_name, type(obj)))
# map axes to numbers
axes = [obj._get_axis_number(a) for a in axes]
# do we have an existing table (if so, use its axes & data_columns)
if self.infer_axes():
existing_table = self.copy()
existing_table.infer_axes()
axes = [a.axis for a in existing_table.index_axes]
data_columns = existing_table.data_columns
nan_rep = existing_table.nan_rep
self.encoding = existing_table.encoding
self.info = copy.copy(existing_table.info)
else:
existing_table = None
# currently support on ndim-1 axes
if len(axes) != self.ndim - 1:
raise ValueError(
"currently only support ndim-1 indexers in an AppendableTable")
# create according to the new data
self.non_index_axes = []
self.data_columns = []
# nan_representation
if nan_rep is None:
nan_rep = 'nan'
self.nan_rep = nan_rep
# create axes to index and non_index
index_axes_map = dict()
for i, a in enumerate(obj.axes):
if i in axes:
name = obj._AXIS_NAMES[i]
index_axes_map[i] = _convert_index(
a, self.encoding, self.format_type
).set_name(name).set_axis(i)
else:
# we might be able to change the axes on the appending data if
# necessary
append_axis = list(a)
if existing_table is not None:
indexer = len(self.non_index_axes)
exist_axis = existing_table.non_index_axes[indexer][1]
if append_axis != exist_axis:
# ahah! -> reindex
if sorted(append_axis) == sorted(exist_axis):
append_axis = exist_axis
# the non_index_axes info
info = _get_info(self.info, i)
info['names'] = list(a.names)
info['type'] = a.__class__.__name__
self.non_index_axes.append((i, append_axis))
# set axis positions (based on the axes)
self.index_axes = [
index_axes_map[a].set_pos(j).update_info(self.info)
for j, a in enumerate(axes)
]
j = len(self.index_axes)
# check for column conflicts
if validate:
for a in self.axes:
a.maybe_set_size(min_itemsize=min_itemsize)
# reindex by our non_index_axes & compute data_columns
for a in self.non_index_axes:
obj = _reindex_axis(obj, a[0], a[1])
def get_blk_items(mgr, blocks):
return [mgr.items.take(blk.mgr_locs) for blk in blocks]
# figure out data_columns and get out blocks
block_obj = self.get_object(obj).consolidate()
blocks = block_obj._data.blocks
blk_items = get_blk_items(block_obj._data, blocks)
if len(self.non_index_axes):
axis, axis_labels = self.non_index_axes[0]
data_columns = self.validate_data_columns(
data_columns, min_itemsize)
if len(data_columns):
mgr = block_obj.reindex_axis(
Index(axis_labels).difference(Index(data_columns)),
axis=axis
)._data
blocks = list(mgr.blocks)
blk_items = get_blk_items(mgr, blocks)
for c in data_columns:
mgr = block_obj.reindex_axis([c], axis=axis)._data
blocks.extend(mgr.blocks)
blk_items.extend(get_blk_items(mgr, mgr.blocks))
# reorder the blocks in the same order as the existing_table if we can
if existing_table is not None:
by_items = dict([(tuple(b_items.tolist()), (b, b_items))
for b, b_items in zip(blocks, blk_items)])
new_blocks = []
new_blk_items = []
for ea in existing_table.values_axes:
items = tuple(ea.values)
try:
b, b_items = by_items.pop(items)
new_blocks.append(b)
new_blk_items.append(b_items)
except:
raise ValueError(
"cannot match existing table structure for [%s] on "
"appending data" % ','.join(com.pprint_thing(item) for
item in items))
blocks = new_blocks
blk_items = new_blk_items
# add my values
self.values_axes = []
for i, (b, b_items) in enumerate(zip(blocks, blk_items)):
# shape of the data column are the indexable axes
klass = DataCol
name = None
# we have a data_column
if (data_columns and len(b_items) == 1 and
b_items[0] in data_columns):
klass = DataIndexableCol
name = b_items[0]
self.data_columns.append(name)
# make sure that we match up the existing columns
# if we have an existing table
if existing_table is not None and validate:
try:
existing_col = existing_table.values_axes[i]
except:
raise ValueError("Incompatible appended table [%s] with "
"existing table [%s]"
% (blocks, existing_table.values_axes))
else:
existing_col = None
try:
col = klass.create_for_block(
i=i, name=name, version=self.version)
col.set_atom(block=b, block_items=b_items,
existing_col=existing_col,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
encoding=self.encoding,
info=self.info,
**kwargs)
col.set_pos(j)
self.values_axes.append(col)
except (NotImplementedError, ValueError, TypeError) as e:
raise e
except Exception as detail:
raise Exception(
"cannot find the correct atom type -> "
"[dtype->%s,items->%s] %s"
% (b.dtype.name, b_items, str(detail))
)
j += 1
# validate our min_itemsize
self.validate_min_itemsize(min_itemsize)
# validate our metadata
self.validate_metadata(existing_table)
# validate the axes if we have an existing table
if validate:
self.validate(existing_table)
def process_axes(self, obj, columns=None):
""" process axes filters """
# make a copy to avoid side effects
if columns is not None:
columns = list(columns)
# make sure to include levels if we have them
if columns is not None and self.is_multi_index:
for n in self.levels:
if n not in columns:
columns.insert(0, n)
# reorder by any non_index_axes & limit to the select columns
for axis, labels in self.non_index_axes:
obj = _reindex_axis(obj, axis, labels, columns)
# apply the selection filters (but keep in the same order)
if self.selection.filter is not None:
for field, op, filt in self.selection.filter.format():
def process_filter(field, filt):
for axis_name in obj._AXIS_NAMES.values():
axis_number = obj._get_axis_number(axis_name)
axis_values = obj._get_axis(axis_name)
# see if the field is the name of an axis
if field == axis_name:
# if we have a multi-index, then need to include
# the levels
if self.is_multi_index:
filt = filt.union(Index(self.levels))
takers = op(axis_values, filt)
return obj.ix._getitem_axis(takers,
axis=axis_number)
# this might be the name of a file IN an axis
elif field in axis_values:
# we need to filter on this dimension
values = _ensure_index(getattr(obj, field).values)
filt = _ensure_index(filt)
# hack until we support reversed dim flags
if isinstance(obj, DataFrame):
axis_number = 1 - axis_number
takers = op(values, filt)
return obj.ix._getitem_axis(takers,
axis=axis_number)
raise ValueError(
"cannot find the field [%s] for filtering!" % field)
obj = process_filter(field, filt)
return obj
def create_description(self, complib=None, complevel=None,
fletcher32=False, expectedrows=None):
""" create the description of the table from the axes & values """
# provided expected rows if its passed
if expectedrows is None:
expectedrows = max(self.nrows_expected, 10000)
d = dict(name='table', expectedrows=expectedrows)
# description from the axes & values
d['description'] = dict([(a.cname, a.typ) for a in self.axes])
if complib:
if complevel is None:
complevel = self._complevel or 9
filters = _tables().Filters(
complevel=complevel, complib=complib,
fletcher32=fletcher32 or self._fletcher32)
d['filters'] = filters
elif self._filters is not None:
d['filters'] = self._filters
return d
def read_coordinates(self, where=None, start=None, stop=None, **kwargs):
"""select coordinates (row numbers) from a table; return the
coordinates object
"""
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return False
# create the selection
self.selection = Selection(
self, where=where, start=start, stop=stop, **kwargs)
coords = self.selection.select_coords()
if self.selection.filter is not None:
for field, op, filt in self.selection.filter.format():
data = self.read_column(field, start=coords.min(), stop=coords.max()+1)
coords = coords[op(data.iloc[coords-coords.min()], filt).values]
return Index(coords)
def read_column(self, column, where=None, start=None, stop=None, **kwargs):
"""return a single column from the table, generally only indexables
are interesting
"""
# validate the version
self.validate_version()
# infer the data kind
if not self.infer_axes():
return False
if where is not None:
raise TypeError("read_column does not currently accept a where "
"clause")
# find the axes
for a in self.axes:
if column == a.name:
if not a.is_data_indexable:
raise ValueError(
"column [%s] can not be extracted individually; it is "
"not data indexable" % column)
# column must be an indexable or a data column
c = getattr(self.table.cols, column)
a.set_info(self.info)
return Series(_set_tz(a.convert(c[start:stop],
nan_rep=self.nan_rep,
encoding=self.encoding
).take_data(),
a.tz, True), name=column)
raise KeyError("column [%s] not found in the table" % column)
class WORMTable(Table):
""" a write-once read-many table: this format DOES NOT ALLOW appending to a
table. writing is a one-time operation the data are stored in a format
that allows for searching the data on disk
"""
table_type = u('worm')
def read(self, **kwargs):
""" read the indicies and the indexing array, calculate offset rows and
return """
raise NotImplementedError("WORMTable needs to implement read")
def write(self, **kwargs):
""" write in a format that we can search later on (but cannot append
to): write out the indicies and the values using _write_array
(e.g. a CArray) create an indexing table so that we can search
"""
raise NotImplementedError("WORKTable needs to implement write")
class LegacyTable(Table):
""" an appendable table: allow append/query/delete operations to a
(possibily) already existing appendable table this table ALLOWS
append (but doesn't require them), and stores the data in a format
that can be easily searched
"""
_indexables = [
IndexCol(name='index', axis=1, pos=0),
IndexCol(name='column', axis=2, pos=1, index_kind='columns_kind'),
DataCol(name='fields', cname='values', kind_attr='fields', pos=2)
]
table_type = u('legacy')
ndim = 3
def write(self, **kwargs):
raise TypeError("write operations are not allowed on legacy tables!")
def read(self, where=None, columns=None, **kwargs):
"""we have n indexable columns, with an arbitrary number of data
axes
"""
if not self.read_axes(where=where, **kwargs):
return None
factors = [Categorical.from_array(a.values, ordered=True) for a in self.index_axes]
levels = [f.categories for f in factors]
N = [len(f.categories) for f in factors]
labels = [f.codes for f in factors]
# compute the key
key = _factor_indexer(N[1:], labels)
objs = []
if len(unique(key)) == len(key):
sorter, _ = algos.groupsort_indexer(
com._ensure_int64(key), np.prod(N))
sorter = com._ensure_platform_int(sorter)
# create the objs
for c in self.values_axes:
# the data need to be sorted
sorted_values = c.take_data().take(sorter, axis=0)
if sorted_values.ndim == 1:
sorted_values = sorted_values.reshape((sorted_values.shape[0],1))
take_labels = [l.take(sorter) for l in labels]
items = Index(c.values)
block = _block2d_to_blocknd(
values=sorted_values, placement=np.arange(len(items)),
shape=tuple(N), labels=take_labels, ref_items=items)
# create the object
mgr = BlockManager([block], [items] + levels)
obj = self.obj_type(mgr)
# permute if needed
if self.is_transposed:
obj = obj.transpose(
*tuple(Series(self.data_orientation).argsort()))
objs.append(obj)
else:
warnings.warn(duplicate_doc, DuplicateWarning, stacklevel=5)
# reconstruct
long_index = MultiIndex.from_arrays(
[i.values for i in self.index_axes])
for c in self.values_axes:
lp = DataFrame(c.data, index=long_index, columns=c.values)
# need a better algorithm
tuple_index = long_index._tuple_index
unique_tuples = lib.fast_unique(tuple_index.values)
unique_tuples = _asarray_tuplesafe(unique_tuples)
indexer = match(unique_tuples, tuple_index)
indexer = com._ensure_platform_int(indexer)
new_index = long_index.take(indexer)
new_values = lp.values.take(indexer, axis=0)
lp = DataFrame(new_values, index=new_index, columns=lp.columns)
objs.append(lp.to_panel())
# create the composite object
if len(objs) == 1:
wp = objs[0]
else:
wp = concat(objs, axis=0, verify_integrity=False).consolidate()
# apply the selection filters & axis orderings
wp = self.process_axes(wp, columns=columns)
return wp
class LegacyFrameTable(LegacyTable):
""" support the legacy frame table """
pandas_kind = u('frame_table')
table_type = u('legacy_frame')
obj_type = Panel
def read(self, *args, **kwargs):
return super(LegacyFrameTable, self).read(*args, **kwargs)['value']
class LegacyPanelTable(LegacyTable):
""" support the legacy panel table """
table_type = u('legacy_panel')
obj_type = Panel
class AppendableTable(LegacyTable):
""" suppor the new appendable table formats """
_indexables = None
table_type = u('appendable')
def write(self, obj, axes=None, append=False, complib=None,
complevel=None, fletcher32=None, min_itemsize=None,
chunksize=None, expectedrows=None, dropna=False, **kwargs):
if not append and self.is_exists:
self._handle.remove_node(self.group, 'table')
# create the axes
self.create_axes(axes=axes, obj=obj, validate=append,
min_itemsize=min_itemsize,
**kwargs)
for a in self.axes:
a.validate(self, append)
if not self.is_exists:
# create the table
options = self.create_description(complib=complib,
complevel=complevel,
fletcher32=fletcher32,
expectedrows=expectedrows)
# set the table attributes
self.set_attrs()
# create the table
table = self._handle.create_table(self.group, **options)
else:
table = self.table
# update my info
self.set_info()
# validate the axes and set the kinds
for a in self.axes:
a.validate_and_set(self, append)
# add the rows
self.write_data(chunksize, dropna=dropna)
def write_data(self, chunksize, dropna=False):
""" we form the data into a 2-d including indexes,values,mask
write chunk-by-chunk """
names = self.dtype.names
nrows = self.nrows_expected
# if dropna==True, then drop ALL nan rows
if dropna:
masks = []
for a in self.values_axes:
# figure the mask: only do if we can successfully process this
# column, otherwise ignore the mask
mask = com.isnull(a.data).all(axis=0)
masks.append(mask.astype('u1', copy=False))
# consolidate masks
mask = masks[0]
for m in masks[1:]:
mask = mask & m
mask = mask.ravel()
else:
mask = None
# broadcast the indexes if needed
indexes = [a.cvalues for a in self.index_axes]
nindexes = len(indexes)
bindexes = []
for i, idx in enumerate(indexes):
# broadcast to all other indexes except myself
if i > 0 and i < nindexes:
repeater = np.prod(
[indexes[bi].shape[0] for bi in range(0, i)])
idx = np.tile(idx, repeater)
if i < nindexes - 1:
repeater = np.prod([indexes[bi].shape[0]
for bi in range(i + 1, nindexes)])
idx = np.repeat(idx, repeater)
bindexes.append(idx)
# transpose the values so first dimension is last
# reshape the values if needed
values = [a.take_data() for a in self.values_axes]
values = [v.transpose(np.roll(np.arange(v.ndim), v.ndim - 1))
for v in values]
bvalues = []
for i, v in enumerate(values):
new_shape = (nrows,) + self.dtype[names[nindexes + i]].shape
bvalues.append(values[i].reshape(new_shape))
# write the chunks
if chunksize is None:
chunksize = 100000
rows = np.empty(min(chunksize,nrows), dtype=self.dtype)
chunks = int(nrows / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
self.write_data_chunk(
rows,
indexes=[a[start_i:end_i] for a in bindexes],
mask=mask[start_i:end_i] if mask is not None else None,
values=[v[start_i:end_i] for v in bvalues])
def write_data_chunk(self, rows, indexes, mask, values):
"""
Parameters
----------
rows : an empty memory space where we are putting the chunk
indexes : an array of the indexes
mask : an array of the masks
values : an array of the values
"""
# 0 len
for v in values:
if not np.prod(v.shape):
return
try:
nrows = indexes[0].shape[0]
if nrows != len(rows):
rows = np.empty(nrows, dtype=self.dtype)
names = self.dtype.names
nindexes = len(indexes)
# indexes
for i, idx in enumerate(indexes):
rows[names[i]] = idx
# values
for i, v in enumerate(values):
rows[names[i + nindexes]] = v
# mask
if mask is not None:
m = ~mask.ravel().astype(bool, copy=False)
if not m.all():
rows = rows[m]
except Exception as detail:
raise Exception("cannot create row-data -> %s" % detail)
try:
if len(rows):
self.table.append(rows)
self.table.flush()
except Exception as detail:
raise TypeError("tables cannot write this data -> %s" % detail)
def delete(self, where=None, start=None, stop=None, **kwargs):
# delete all rows (and return the nrows)
if where is None or not len(where):
if start is None and stop is None:
nrows = self.nrows
self._handle.remove_node(self.group, recursive=True)
else:
# pytables<3.0 would remove a single row with stop=None
if stop is None:
stop = self.nrows
nrows = self.table.remove_rows(start=start, stop=stop)
self.table.flush()
return nrows
# infer the data kind
if not self.infer_axes():
return None
# create the selection
table = self.table
self.selection = Selection(self, where, start=start, stop=stop, **kwargs)
values = self.selection.select_coords()
# delete the rows in reverse order
l = Series(values).sort_values()
ln = len(l)
if ln:
# construct groups of consecutive rows
diff = l.diff()
groups = list(diff[diff > 1].index)
# 1 group
if not len(groups):
groups = [0]
# final element
if groups[-1] != ln:
groups.append(ln)
# initial element
if groups[0] != 0:
groups.insert(0, 0)
# we must remove in reverse order!
pg = groups.pop()
for g in reversed(groups):
rows = l.take(lrange(g, pg))
table.remove_rows(start=rows[rows.index[0]
], stop=rows[rows.index[-1]] + 1)
pg = g
self.table.flush()
# return the number of rows removed
return ln
class AppendableFrameTable(AppendableTable):
""" suppor the new appendable table formats """
pandas_kind = u('frame_table')
table_type = u('appendable_frame')
ndim = 2
obj_type = DataFrame
@property
def is_transposed(self):
return self.index_axes[0].axis == 1
def get_object(self, obj):
""" these are written transposed """
if self.is_transposed:
obj = obj.T
return obj
def read(self, where=None, columns=None, **kwargs):
if not self.read_axes(where=where, **kwargs):
return None
info = (self.info.get(self.non_index_axes[0][0], dict())
if len(self.non_index_axes) else dict())
index = self.index_axes[0].values
frames = []
for a in self.values_axes:
# we could have a multi-index constructor here
# _ensure_index doesn't recognized our list-of-tuples here
if info.get('type') == 'MultiIndex':
cols = MultiIndex.from_tuples(a.values)
else:
cols = Index(a.values)
names = info.get('names')
if names is not None:
cols.set_names(names, inplace=True)
if self.is_transposed:
values = a.cvalues
index_ = cols
cols_ = Index(index, name=getattr(index, 'name', None))
else:
values = a.cvalues.T
index_ = Index(index, name=getattr(index, 'name', None))
cols_ = cols
# if we have a DataIndexableCol, its shape will only be 1 dim
if values.ndim == 1 and isinstance(values, np.ndarray):
values = values.reshape((1, values.shape[0]))
block = make_block(values, placement=np.arange(len(cols_)))
mgr = BlockManager([block], [cols_, index_])
frames.append(DataFrame(mgr))
if len(frames) == 1:
df = frames[0]
else:
df = concat(frames, axis=1, verify_integrity=False).consolidate()
# apply the selection filters & axis orderings
df = self.process_axes(df, columns=columns)
return df
class AppendableSeriesTable(AppendableFrameTable):
""" support the new appendable table formats """
pandas_kind = u('series_table')
table_type = u('appendable_series')
ndim = 2
obj_type = Series
storage_obj_type = DataFrame
@property
def is_transposed(self):
return False
def get_object(self, obj):
return obj
def write(self, obj, data_columns=None, **kwargs):
""" we are going to write this as a frame table """
if not isinstance(obj, DataFrame):
name = obj.name or 'values'
obj = DataFrame({name: obj}, index=obj.index)
obj.columns = [name]
return super(AppendableSeriesTable, self).write(
obj=obj, data_columns=obj.columns, **kwargs)
def read(self, columns=None, **kwargs):
is_multi_index = self.is_multi_index
if columns is not None and is_multi_index:
for n in self.levels:
if n not in columns:
columns.insert(0, n)
s = super(AppendableSeriesTable, self).read(columns=columns, **kwargs)
if is_multi_index:
s.set_index(self.levels, inplace=True)
s = s.iloc[:, 0]
# remove the default name
if s.name == 'values':
s.name = None
return s
class AppendableMultiSeriesTable(AppendableSeriesTable):
""" support the new appendable table formats """
pandas_kind = u('series_table')
table_type = u('appendable_multiseries')
def write(self, obj, **kwargs):
""" we are going to write this as a frame table """
name = obj.name or 'values'
obj, self.levels = self.validate_multiindex(obj)
cols = list(self.levels)
cols.append(name)
obj.columns = cols
return super(AppendableMultiSeriesTable, self).write(obj=obj, **kwargs)
class GenericTable(AppendableFrameTable):
""" a table that read/writes the generic pytables table format """
pandas_kind = u('frame_table')
table_type = u('generic_table')
ndim = 2
obj_type = DataFrame
@property
def pandas_type(self):
return self.pandas_kind
@property
def storable(self):
return getattr(self.group, 'table', None) or self.group
def get_attrs(self):
""" retrieve our attributes """
self.non_index_axes = []
self.nan_rep = None
self.levels = []
self.index_axes = [a.infer(self)
for a in self.indexables if a.is_an_indexable]
self.values_axes = [a.infer(self)
for a in self.indexables if not a.is_an_indexable]
self.data_columns = [a.name for a in self.values_axes]
@property
def indexables(self):
""" create the indexables from the table description """
if self._indexables is None:
d = self.description
# the index columns is just a simple index
self._indexables = [GenericIndexCol(name='index', axis=0)]
for i, n in enumerate(d._v_names):
dc = GenericDataIndexableCol(
name=n, pos=i, values=[n], version=self.version)
self._indexables.append(dc)
return self._indexables
def write(self, **kwargs):
raise NotImplementedError("cannot write on an generic table")
class AppendableMultiFrameTable(AppendableFrameTable):
""" a frame with a multi-index """
table_type = u('appendable_multiframe')
obj_type = DataFrame
ndim = 2
_re_levels = re.compile("^level_\d+$")
@property
def table_type_short(self):
return u('appendable_multi')
def write(self, obj, data_columns=None, **kwargs):
if data_columns is None:
data_columns = []
elif data_columns is True:
data_columns = obj.columns[:]
obj, self.levels = self.validate_multiindex(obj)
for n in self.levels:
if n not in data_columns:
data_columns.insert(0, n)
return super(AppendableMultiFrameTable, self).write(
obj=obj, data_columns=data_columns, **kwargs)
def read(self, **kwargs):
df = super(AppendableMultiFrameTable, self).read(**kwargs)
df = df.set_index(self.levels)
# remove names for 'level_%d'
df.index = df.index.set_names([
None if self._re_levels.search(l) else l for l in df.index.names
])
return df
class AppendablePanelTable(AppendableTable):
""" suppor the new appendable table formats """
table_type = u('appendable_panel')
ndim = 3
obj_type = Panel
def get_object(self, obj):
""" these are written transposed """
if self.is_transposed:
obj = obj.transpose(*self.data_orientation)
return obj
@property
def is_transposed(self):
return self.data_orientation != tuple(range(self.ndim))
class AppendableNDimTable(AppendablePanelTable):
""" suppor the new appendable table formats """
table_type = u('appendable_ndim')
ndim = 4
obj_type = Panel4D
def _reindex_axis(obj, axis, labels, other=None):
ax = obj._get_axis(axis)
labels = _ensure_index(labels)
# try not to reindex even if other is provided
# if it equals our current index
if other is not None:
other = _ensure_index(other)
if (other is None or labels.equals(other)) and labels.equals(ax):
return obj
labels = _ensure_index(labels.unique())
if other is not None:
labels = labels & _ensure_index(other.unique())
if not labels.equals(ax):
slicer = [slice(None, None)] * obj.ndim
slicer[axis] = labels
obj = obj.loc[tuple(slicer)]
return obj
def _get_info(info, name):
""" get/create the info for this name """
try:
idx = info[name]
except:
idx = info[name] = dict()
return idx
### tz to/from coercion ###
def _get_tz(tz):
""" for a tz-aware type, return an encoded zone """
zone = tslib.get_timezone(tz)
if zone is None:
zone = tslib.tot_seconds(tz.utcoffset())
return zone
def _set_tz(values, tz, preserve_UTC=False, coerce=False):
"""
coerce the values to a DatetimeIndex if tz is set
preserve the input shape if possible
Parameters
----------
values : ndarray
tz : string/pickled tz object
preserve_UTC : boolean,
preserve the UTC of the result
coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray
"""
if tz is not None:
values = values.ravel()
tz = tslib.get_timezone(_ensure_decoded(tz))
values = DatetimeIndex(values)
if values.tz is None:
values = values.tz_localize('UTC').tz_convert(tz)
if preserve_UTC:
if tz == 'UTC':
values = list(values)
elif coerce:
values = np.asarray(values, dtype='M8[ns]')
return values
def _convert_index(index, encoding=None, format_type=None):
index_name = getattr(index, 'name', None)
if isinstance(index, DatetimeIndex):
converted = index.asi8
return IndexCol(converted, 'datetime64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
tz=getattr(index, 'tz', None),
index_name=index_name)
elif isinstance(index, TimedeltaIndex):
converted = index.asi8
return IndexCol(converted, 'timedelta64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
index_name=index_name)
elif isinstance(index, (Int64Index, PeriodIndex)):
atom = _tables().Int64Col()
return IndexCol(
index.values, 'integer', atom, freq=getattr(index, 'freq', None),
index_name=index_name)
if isinstance(index, MultiIndex):
raise TypeError('MultiIndex not supported here!')
inferred_type = lib.infer_dtype(index)
values = np.asarray(index)
if inferred_type == 'datetime64':
converted = values.view('i8')
return IndexCol(converted, 'datetime64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
tz=getattr(index, 'tz', None),
index_name=index_name)
elif inferred_type == 'timedelta64':
converted = values.view('i8')
return IndexCol(converted, 'timedelta64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
index_name=index_name)
elif inferred_type == 'datetime':
converted = np.asarray([(time.mktime(v.timetuple()) +
v.microsecond / 1E6) for v in values],
dtype=np.float64)
return IndexCol(converted, 'datetime', _tables().Time64Col(),
index_name=index_name)
elif inferred_type == 'date':
converted = np.asarray([v.toordinal() for v in values],
dtype=np.int32)
return IndexCol(converted, 'date', _tables().Time32Col(),
index_name=index_name)
elif inferred_type == 'string':
# atom = _tables().ObjectAtom()
# return np.asarray(values, dtype='O'), 'object', atom
converted = _convert_string_array(values, encoding)
itemsize = converted.dtype.itemsize
return IndexCol(
converted, 'string', _tables().StringCol(itemsize),
itemsize=itemsize, index_name=index_name
)
elif inferred_type == 'unicode':
if format_type == 'fixed':
atom = _tables().ObjectAtom()
return IndexCol(np.asarray(values, dtype='O'), 'object', atom,
index_name=index_name)
raise TypeError(
"[unicode] is not supported as a in index type for [{0}] formats"
.format(format_type)
)
elif inferred_type == 'integer':
# take a guess for now, hope the values fit
atom = _tables().Int64Col()
return IndexCol(np.asarray(values, dtype=np.int64), 'integer', atom,
index_name=index_name)
elif inferred_type == 'floating':
atom = _tables().Float64Col()
return IndexCol(np.asarray(values, dtype=np.float64), 'float', atom,
index_name=index_name)
else: # pragma: no cover
atom = _tables().ObjectAtom()
return IndexCol(np.asarray(values, dtype='O'), 'object', atom,
index_name=index_name)
def _unconvert_index(data, kind, encoding=None):
kind = _ensure_decoded(kind)
if kind == u('datetime64'):
index = DatetimeIndex(data)
elif kind == u('timedelta64'):
index = TimedeltaIndex(data)
elif kind == u('datetime'):
index = np.asarray([datetime.fromtimestamp(v) for v in data],
dtype=object)
elif kind == u('date'):
try:
index = np.asarray(
[date.fromordinal(v) for v in data], dtype=object)
except (ValueError):
index = np.asarray(
[date.fromtimestamp(v) for v in data], dtype=object)
elif kind in (u('integer'), u('float')):
index = np.asarray(data)
elif kind in (u('string')):
index = _unconvert_string_array(data, nan_rep=None, encoding=encoding)
elif kind == u('object'):
index = np.asarray(data[0])
else: # pragma: no cover
raise ValueError('unrecognized index type %s' % kind)
return index
def _unconvert_index_legacy(data, kind, legacy=False, encoding=None):
kind = _ensure_decoded(kind)
if kind == u('datetime'):
index = lib.time64_to_datetime(data)
elif kind in (u('integer')):
index = np.asarray(data, dtype=object)
elif kind in (u('string')):
index = _unconvert_string_array(data, nan_rep=None, encoding=encoding)
else: # pragma: no cover
raise ValueError('unrecognized index type %s' % kind)
return index
def _convert_string_array(data, encoding, itemsize=None):
"""
we take a string-like that is object dtype and coerce to a fixed size string type
Parameters
----------
data : a numpy array of object dtype
encoding : None or string-encoding
itemsize : integer, optional, defaults to the max length of the strings
Returns
-------
data in a fixed-length string dtype, encoded to bytes if needed
"""
# encode if needed
if encoding is not None and len(data):
data = Series(data.ravel()).str.encode(encoding).values.reshape(data.shape)
# create the sized dtype
if itemsize is None:
itemsize = lib.max_len_string_array(com._ensure_object(data.ravel()))
data = np.asarray(data, dtype="S%d" % itemsize)
return data
def _unconvert_string_array(data, nan_rep=None, encoding=None):
"""
inverse of _convert_string_array
Parameters
----------
data : fixed length string dtyped array
nan_rep : the storage repr of NaN, optional
encoding : the encoding of the data, optional
Returns
-------
an object array of the decoded data
"""
shape = data.shape
data = np.asarray(data.ravel(), dtype=object)
# guard against a None encoding in PY3 (because of a legacy
# where the passed encoding is actually None)
encoding = _ensure_encoding(encoding)
if encoding is not None and len(data):
itemsize = lib.max_len_string_array(com._ensure_object(data))
if compat.PY3:
dtype = "U{0}".format(itemsize)
else:
dtype = "S{0}".format(itemsize)
if isinstance(data[0], compat.binary_type):
data = Series(data).str.decode(encoding).values
else:
data = data.astype(dtype, copy=False).astype(object, copy=False)
if nan_rep is None:
nan_rep = 'nan'
data = lib.string_array_replace_from_nan_rep(data, nan_rep)
return data.reshape(shape)
def _maybe_convert(values, val_kind, encoding):
if _need_convert(val_kind):
conv = _get_converter(val_kind, encoding)
# conv = np.frompyfunc(conv, 1, 1)
values = conv(values)
return values
def _get_converter(kind, encoding):
kind = _ensure_decoded(kind)
if kind == 'datetime64':
return lambda x: np.asarray(x, dtype='M8[ns]')
elif kind == 'datetime':
return lib.convert_timestamps
elif kind == 'string':
return lambda x: _unconvert_string_array(x, encoding=encoding)
else: # pragma: no cover
raise ValueError('invalid kind %s' % kind)
def _need_convert(kind):
kind = _ensure_decoded(kind)
if kind in (u('datetime'), u('datetime64'), u('string')):
return True
return False
class Selection(object):
"""
Carries out a selection operation on a tables.Table object.
Parameters
----------
table : a Table object
where : list of Terms (or convertable to)
start, stop: indicies to start and/or stop selection
"""
def __init__(self, table, where=None, start=None, stop=None, **kwargs):
self.table = table
self.where = where
self.start = start
self.stop = stop
self.condition = None
self.filter = None
self.terms = None
self.coordinates = None
if com.is_list_like(where):
# see if we have a passed coordinate like
try:
inferred = lib.infer_dtype(where)
if inferred == 'integer' or inferred == 'boolean':
where = np.asarray(where)
if where.dtype == np.bool_:
start, stop = self.start, self.stop
if start is None:
start = 0
if stop is None:
stop = self.table.nrows
self.coordinates = np.arange(start, stop)[where]
elif issubclass(where.dtype.type, np.integer):
if ((self.start is not None and
(where < self.start).any()) or
(self.stop is not None and
(where >= self.stop).any())):
raise ValueError(
"where must have index locations >= start and "
"< stop"
)
self.coordinates = where
except:
pass
if self.coordinates is None:
self.terms = self.generate(where)
# create the numexpr & the filter
if self.terms is not None:
self.condition, self.filter = self.terms.evaluate()
def generate(self, where):
""" where can be a : dict,list,tuple,string """
if where is None:
return None
q = self.table.queryables()
try:
return Expr(where, queryables=q, encoding=self.table.encoding)
except NameError as detail:
# raise a nice message, suggesting that the user should use
# data_columns
raise ValueError(
"The passed where expression: {0}\n"
" contains an invalid variable reference\n"
" all of the variable refrences must be a "
"reference to\n"
" an axis (e.g. 'index' or 'columns'), or a "
"data_column\n"
" The currently defined references are: {1}\n"
.format(where, ','.join(q.keys()))
)
def select(self):
"""
generate the selection
"""
if self.condition is not None:
return self.table.table.read_where(self.condition.format(),
start=self.start, stop=self.stop)
elif self.coordinates is not None:
return self.table.table.read_coordinates(self.coordinates)
return self.table.table.read(start=self.start, stop=self.stop)
def select_coords(self):
"""
generate the selection
"""
start, stop = self.start, self.stop
nrows = self.table.nrows
if start is None:
start = 0
elif start < 0:
start += nrows
if self.stop is None:
stop = nrows
elif stop < 0:
stop += nrows
if self.condition is not None:
return self.table.table.get_where_list(self.condition.format(),
start=start, stop=stop,
sort=True)
elif self.coordinates is not None:
return self.coordinates
return np.arange(start, stop)
# utilities ###
def timeit(key, df, fn=None, remove=True, **kwargs):
if fn is None:
fn = 'timeit.h5'
store = HDFStore(fn, mode='w')
store.append(key, df, **kwargs)
store.close()
if remove:
os.remove(fn)
| mit |
leonardbinet/Transilien-Api-ETL | api_etl/builder_feature_matrix.py | 2 | 35646 | """Module containing class to build feature matrices for prediction.
There are two kinds of features:
- either features for direct prediction model
- either features for recursive prediction model
Only the first one is used for now.
"""
from os import path, makedirs
import logging
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from api_etl.utils_misc import (
get_paris_local_datetime_now, DateConverter, S3Bucket
)
from api_etl.querier_schedule import DBQuerier
from api_etl.querier_realtime import ResultsSet
from api_etl.settings import __S3_BUCKETS__, __TRAINING_SET_FOLDER_PATH__, __RAW_DAYS_FOLDER_PATH__, __DATA_PATH__
logger = logging.getLogger(__name__)
pd.options.mode.chained_assignment = None
class DayMatrixBuilder:
"""Build features and label matrices from data available from schedule
and from realtime info.
1st step (init): get all information from day (schedule+realtime):
needs day parameter (else set to today).
2nd step: build matrices using only data available at given time: needs
time parameter (else set to time now).
Still "beta" functionality: provide df directly.
"""
def __init__(self, day=None, df=None):
""" Given a day, will query schedule and realtime information to
provide a dataframe containing all stops.
"""
# Arguments validation and parsing
if day:
# will raise error if wrong format
datetime.strptime(day, "%Y%m%d")
self.day = str(day)
else:
dt_today = get_paris_local_datetime_now()
self.day = dt_today.strftime("%Y%m%d")
logger.info("Day considered: %s" % self.day)
if isinstance(df, pd.DataFrame):
self._initial_df = df
self._builder_realtime_request_time = None
logger.info("Dataframe provided for day %s" % self.day)
else:
logger.info("Requesting data for day %s" % self.day)
self.querier = DBQuerier(scheduled_day=self.day)
# Get schedule
self.stops_results = self.querier.stoptimes(on_day=self.day, level=4)
self.serialized_stoptimes = ResultsSet(self.stops_results)
logger.info("Schedule queried.")
# Perform realtime queries
dt_realtime_request = get_paris_local_datetime_now()
self._builder_realtime_request_time = dt_realtime_request\
.strftime("%H:%M:%S")
self.serialized_stoptimes.batch_realtime_query(self.day)
logger.info("RealTime queried.")
# Export flat dict as dataframe
self._initial_df = pd\
.DataFrame(self.serialized_stoptimes.get_flat_dicts())
logger.info("Initial dataframe created.")
# Datetime considered as now
self.paris_datetime_now = get_paris_local_datetime_now()
self._clean_initial_df()
logger.info("Initial dataframe cleaned.")
self._compute_initial_dates()
logger.info("Initial dataframe calculations computed.")
def _clean_initial_df(self):
""" Set Nan values, and convert necessary columns as float.
"""
# Replace Unknown by Nan
self._initial_df.replace("Unknown", np.nan, inplace=True)
# Convert to numeric
cols_to_num = ["StopTime_stop_sequence", "RealTime_data_freshness"]
for col in cols_to_num:
self._initial_df.loc[:, col] = pd\
.to_numeric(self._initial_df.loc[:, col], errors="coerce")
def _compute_initial_dates(self):
""" Adds following columns:
- D_business_day: bool
- D_stop_special_day: scheduled_day str, day in special date (25h)
- D_total_sequence: int: number of stops scheduled per trip
- D_stop_scheduled_datetime: datetime of scheduled stoptime
- D_trip_passed_scheduled_stop: bool
"""
# Detect if working day
self._initial_df.loc[:, "D_business_day"] = bool(
len(pd.bdate_range(self.day, self.day)))
# Write stoptime_day
self._initial_df.loc[:, "D_stop_special_day"] = self.day
# Scheduled stop datetime
self._initial_df.loc[:, "D_stop_scheduled_datetime"] = self._initial_df\
.StopTime_departure_time\
.apply(lambda x: DateConverter(
special_time=x,
special_date=self.day,
force_regular_date=True
).dt
)
# Has really passed schedule
self._initial_df.loc[:, "D_trip_passed_scheduled_stop"] = self._initial_df.D_stop_scheduled_datetime\
.apply(lambda x:
(self.paris_datetime_now - x).total_seconds() >= 0
)
# Observed stop datetime
self._initial_df.loc[:, "D_stop_observed_datetime"] = self\
._initial_df[self._initial_df.RealTime_data_freshness.notnull()]\
.apply(lambda x: DateConverter(
special_time=x.RealTime_expected_passage_time,
special_date=x.RealTime_expected_passage_day
).dt,
axis=1
)
self._initial_df.loc[:, "D_trip_time_to_observed_stop"] = self\
._initial_df[self._initial_df.D_stop_observed_datetime.notnull()]\
.D_stop_observed_datetime\
.apply(lambda x:
(self.paris_datetime_now - x).total_seconds()
)
# Has really passed observed stop
self._initial_df.loc[:, "D_trip_passed_observed_stop"] = self\
._initial_df[self._initial_df.D_stop_observed_datetime.notnull()]\
.D_trip_time_to_observed_stop\
.apply(lambda x: (x >= 0))
# Trip delay
self._initial_df.loc[:, "D_trip_delay"] = self\
._initial_df[self._initial_df.RealTime_data_freshness.notnull()]\
.apply(
lambda x:
(x["D_stop_observed_datetime"] -
x["D_stop_scheduled_datetime"])
.total_seconds(),
axis=1
)
# Trips total number of stops
trips_total_number_stations = self._initial_df\
.groupby("Trip_trip_id")["Stop_stop_id"].count()
trips_total_number_stations.name = "D_trip_number_of_stops"
self._initial_df = self._initial_df\
.join(trips_total_number_stations, on="Trip_trip_id")
def stats(self):
message = """
SUMMARY FOR DAY %(day)s: based on information available and requested
at time %(request_time)s, and trips passage being evaluated given time
%(date_now)s
TRIPS
Number of trips today: %(trips_today)s
STOPTIMES
Number of stop times that day: %(stoptimes_today)s
- Passed:
- scheduled: %(stoptimes_passed)s
- observed: %(stoptimes_passed_observed)s
- Not passed yet:
- scheduled: %(stoptimes_not_passed)s
- observed (predictions on boards) %(stoptimes_not_passed_observed)s
"""
self.summary = {
"day": self.day,
"request_time": self._builder_realtime_request_time,
"date_now": self.paris_datetime_now,
"trips_today": len(self._initial_df.Trip_trip_id.unique()),
"stoptimes_today": self._initial_df.Trip_trip_id.count(),
"stoptimes_passed": self._initial_df
.D_trip_passed_scheduled_stop.sum(),
"stoptimes_passed_observed": self._initial_df.
D_trip_passed_observed_stop.sum(),
"stoptimes_not_passed": (~self._initial_df.D_trip_passed_scheduled_stop).sum(),
"stoptimes_not_passed_observed":
(self._initial_df.D_trip_passed_observed_stop == False).sum(),
}
print(message % self.summary)
def missing_data_per(self, per="Route_route_short_name"):
# per can be also "Stop_stop_id", "Route_route_short_name"
md = self._initial_df.copy()
md.loc[:, "observed"] = md\
.loc[:, "RealTime_day_train_num"]\
.notnull().apply(int)
group = md.groupby(per)["observed"]
agg_observed = group.sum()
agg_scheduled = group.count()
agg_ratio = group.mean()
agg = pd.concat([agg_observed, agg_scheduled, agg_ratio], axis=1)
agg.columns = ["Observed", "Scheduled", "Ratio"]
return agg
class DirectPredictionMatrix(DayMatrixBuilder):
# CONFIGURATION
# Number of past seconds considered for station median delay
_secs = 1200
# Features columns
_feature_cols = [
"Route_route_short_name",
"TS_last_observed_delay",
"TS_line_station_median_delay",
"TS_line_median_delay",
"Trip_direction_id",
"TS_sequence_diff",
"TS_stations_scheduled_trip_time",
"TS_rolling_trips_on_line",
"RealTime_miss",
"D_business_day"
]
# Core identification columns
_id_cols = [
"TS_matrix_datetime",
"Route_route_short_name",
"RealTime_miss",
"Trip_trip_id",
"Stop_stop_id",
"TS_sequence_diff",
"TS_stations_scheduled_trip_time",
]
# Label columns
_label_cols = ["label", "label_ev"]
# Scoring columns
_scoring_cols = ["S_naive_pred_mae", "S_naive_pred_mse"]
# Prediction columns
_prediction_cols = ["P_api_pred", "P_api_pred_ev", "P_naive_pred"]
# Other useful columns
_other_useful_cols = [
"StopTime_departure_time",
"StopTime_stop_sequence",
"Stop_stop_name",
"RealTime_expected_passage_time",
"RealTime_data_freshness",
]
# For time debugging:
_time_debug_cols = [
"StopTime_departure_time", "RealTime_expected_passage_time",
'D_stop_special_day', 'D_stop_scheduled_datetime',
'D_trip_passed_scheduled_stop', 'D_stop_observed_datetime',
'D_trip_time_to_observed_stop', 'D_trip_passed_observed_stop',
'D_trip_delay', 'TS_matrix_datetime',
'TS_trip_passed_scheduled_stop', 'TS_observed_vs_matrix_datetime',
'TS_trip_passed_observed_stop', 'TS_observed_delay',
'TS_expected_delay', 'TS_trip_status'
]
def __init__(self, day=None, df=None):
DayMatrixBuilder.__init__(self, day=day, df=df)
self._state_at_time_computed = False
def direct_compute_for_time(self, time="12:00:00"):
"""Given the data obtained from schedule and realtime, this method will
compute network state at a given time, and provide prediction and label
matrices.
:param time:
"""
# Parameters parsing
full_str_dt = "%s%s" % (self.day, time)
# will raise error if wrong format
self.state_at_datetime = datetime\
.strptime(full_str_dt, "%Y%m%d%H:%M:%S")
self.time = time
logger.info(
"Building Matrix for day %s and time %s" % (
self.day, self.time)
)
# Recreate dataframe from initial one (deletes changes)
self.df = self._initial_df.copy()
# Computing
self._compute_trip_state()
logger.info("TripPredictor computed.")
self._trip_level()
logger.info("Trip level computations performed.")
self._line_level()
logger.info("Line level computations performed.")
# Will add labels if information is available
self._compute_labels()
logger.info("Labels assigned.")
self._compute_api_pred()
logger.info("Api and naive predictions assigned.")
self._compute_pred_scores()
logger.info("Naive predictions scored.")
def _compute_trip_state(self):
"""Computes:
- TS_matrix_datetime: datetime
= datetime for which state is computed
- TS_trip_passed_scheduled_stop: Bool
= at matrix datetime, has train passed scheduled stop?
- TS_observed_vs_matrix_datetime: int (seconds)
- TS_trip_passed_observed_stop: Bool
= at matrix datetime, has train passed observed stop?
- TS_observed_delay: int (seconds)
- TS_expected_delay: int (seconds)
"""
self.df.loc[:, "TS_matrix_datetime"] = self.state_at_datetime\
.strftime("%Y%m%d-%H:%M:%S")
# Has passed scheduled stop at state datetime
self.df.loc[:, "TS_trip_passed_scheduled_stop"] = self.df\
.D_stop_scheduled_datetime\
.apply(lambda x:
((self.state_at_datetime - x).total_seconds() >= 0),
)
# Time between matrix datetime (for which we compute the prediction
# features matrix), and stop times observed passages (only for observed
# passages). <0 means passed, >0 means not passed yet at the given time
self.df.loc[:, "TS_observed_vs_matrix_datetime"] = self\
.df[self.df["D_stop_observed_datetime"].notnull()]\
.D_stop_observed_datetime\
.apply(lambda x: (self.state_at_datetime - x).total_seconds())
# Has passed observed stop time at state datetime
self.df.loc[:, "TS_trip_passed_observed_stop"] = self\
.df[self.df["TS_observed_vs_matrix_datetime"]
.notnull()]\
.loc[:, "TS_observed_vs_matrix_datetime"]\
.apply(lambda x: (x >= 0))
# TripState_observed_delay
self.df.loc[:, "TS_observed_delay"] = self\
.df[self.df["TS_trip_passed_observed_stop"] == True]\
.D_trip_delay
# TripState_expected_delay
self.df.loc[:, "TS_expected_delay"] = self\
.df.query("(TS_trip_passed_observed_stop != True) & (RealTime_data_freshness.notnull())")\
.D_trip_delay
self._state_at_time_computed = True
def _trip_level(self):
"""Compute trip level information:
- TS_trip_status: 0<=x<=1: proportion of passed stations at time
- D_total_sequence: number of stops scheduled for this trip
- last_sequence_number: last observed stop sequence for this trip at
time
- last_observed_delay
"""
# Trips total number of stops
trips_total_number_stations = self.df\
.groupby("Trip_trip_id")["Stop_stop_id"].count()
# already added to day matrix
# Trips status at time
trips_number_passed_stations = self.df\
.groupby("Trip_trip_id")["TS_trip_passed_scheduled_stop"].sum()
trips_status = trips_number_passed_stations \
/ trips_total_number_stations
trips_status.name = "TS_trip_status"
self.trips_status = trips_status
self.df = self.df.join(trips_status, on="Trip_trip_id")
# Trips last observed stop_sequence
last_sequence_number = self\
.df.query("(TS_trip_status < 1) & (TS_trip_status > 0) & (TS_trip_passed_observed_stop == True)")\
.groupby("Trip_trip_id")["StopTime_stop_sequence"].max()
last_sequence_number.name = "TS_last_sequence_number"
self.df = self.df.join(last_sequence_number, on="Trip_trip_id")
# Compute number of stops between last observed station and predicted
# station.
self.df.loc[:, "TS_sequence_diff"] = self.df.StopTime_stop_sequence - \
self.df.loc[:, "TS_last_sequence_number"]
# Trips last observed delay
last_observed_delay = self.df\
.query("TS_last_sequence_number==StopTime_stop_sequence")\
.loc[:, ["Trip_trip_id", "TS_observed_delay"]]
last_observed_delay.set_index("Trip_trip_id", inplace=True)
last_observed_delay.columns = ["TS_last_observed_delay"]
self.df = self.df.join(last_observed_delay, on="Trip_trip_id")
# Trips last observed scheduled departure time
# useful to know how much time was scheduled between stations
last_observed_scheduled_dep_time = self.df\
.query("TS_last_sequence_number==StopTime_stop_sequence")\
.loc[:, ["Trip_trip_id", "StopTime_departure_time"]]
last_observed_scheduled_dep_time\
.set_index("Trip_trip_id", inplace=True)
last_observed_scheduled_dep_time.columns = [
"TS_last_observed_scheduled_dep_time"]
self.df = self.df\
.join(last_observed_scheduled_dep_time, on="Trip_trip_id")
# Compute number of seconds between last observed passed trip scheduled
# departure time, and departure time of predited station
self.df.loc[:, "TS_stations_scheduled_trip_time"] = self.df\
.query("TS_last_observed_scheduled_dep_time.notnull()")\
.apply(lambda x:
DateConverter(dt=x["D_stop_scheduled_datetime"])
.compute_delay_from(
special_date=self.day,
special_time=x["TS_last_observed_scheduled_dep_time"],
force_regular_date=True
),
axis=1
)
def _line_level(self):
""" Computes line level information:
- median delay on line on last n seconds
- median delay on line station on last n seconds
- number of currently rolling trips on line
Requires time to now (_add_time_to_now_col).
"""
# Compute delays on last n seconds (defined in init self._secs)
# Line aggregation
line_median_delay = self.df\
.query("(TS_observed_vs_matrix_datetime<%s) & (TS_observed_vs_matrix_datetime>=0) " % self._secs)\
.groupby("Route_route_short_name")\
.TS_observed_delay.median()
line_median_delay.name = "TS_line_median_delay"
self.df = self.df\
.join(line_median_delay, on="Route_route_short_name")
self.line_median_delay = line_median_delay
# Line and station aggregation
# same station can have different values given on which lines it
# is located.
line_station_median_delay = self.df\
.query("(TS_observed_vs_matrix_datetime < %s) & (TS_observed_vs_matrix_datetime>=0) " % self._secs)\
.groupby(["Route_route_short_name", "Stop_stop_id"])\
.TS_observed_delay.median()
line_station_median_delay.name = "TS_line_station_median_delay"
self.df = self.df\
.join(line_station_median_delay, on=["Route_route_short_name", "Stop_stop_id"])
self.line_station_median_delay = line_station_median_delay
# Number of currently rolling trips
rolling_trips_on_line = self\
.df.query("TS_trip_status>0 & TS_trip_status<1")\
.groupby("Route_route_short_name")\
.Trip_trip_id\
.count()
rolling_trips_on_line.name = "TS_rolling_trips_on_line"
self.df = self.df\
.join(rolling_trips_on_line, on="Route_route_short_name")
self.rolling_trips_on_line = rolling_trips_on_line
def _compute_labels(self):
"""Two main logics:
- either retroactive: then TripState_expected_delay is real one: label.
- either realtime (not retroactive): then we don't have real label, but
we have a api prediction.
Retroactive:
Adds two columns:
- label: observed delay at stop: real one.
- label_ev: observed delay evolution (difference between observed
delay predicted stop, and delay at last observed stop)
Not retroactive: realtime:
Adds two columns:
- P_api_pred: predicted delay from api.
- P_api_pred_ev: predicted evolution (from api) of delay.
"""
# if stop time really occured, then expected delay (extracted from api)
# is real one
self.df.loc[:, "label"] = self.df\
.query("D_trip_passed_observed_stop==True")\
.TS_expected_delay
# Evolution of delay between last observed station and predicted
# station
self.df.loc[:, "label_ev"] = self.df\
.query("D_trip_passed_observed_stop == True")\
.apply(lambda x: x.label - x["TS_last_observed_delay"], axis=1)
def _compute_api_pred(self):
"""This method provides two predictions if possible:
- naive pred: delay translation (last observed delay)
- api prediction
"""
# if not passed: it is the api-prediction
self.df.loc[:, "P_api_pred"] = self.df\
.query("D_trip_passed_observed_stop != True")\
.TS_expected_delay
# api delay evolution prediction
self.df.loc[:, "P_api_pred_ev"] = self.df\
.query("D_trip_passed_observed_stop != True")\
.apply(lambda x: x.label - x["TS_last_observed_delay"], axis=1)
self.df.loc[:, "P_naive_pred"] = self.df.loc[
:, "TS_last_observed_delay"]
def _compute_pred_scores(self):
"""
We can compute score only for stoptimes for which we have real
information.
At no point we will be able to have both real information and api pred,
so we only compute score for naive prediction.
NAIVE PREDICTION:
Naive prediction assumes that delay does not evolve:
- evolution of delay = 0
- delay predicted = last_observed_delay
=> error = real_delay - naive_pred
= label - last_observed_delay
= label_ev
Scores for navie prediction for delay can be:
- naive_pred_mae: mean absolute error: |label_ev|
- naive_pred_mse: mean square error: (label_ev)**2
"""
self.df.loc[:, "S_naive_pred_mae"] = self.df["label_ev"].abs()
self.df.loc[:, "S_naive_pred_mse"] = self.df["label_ev"]**2
def stats(self):
DayMatrixBuilder.stats(self)
if not self._state_at_time_computed:
return None
message = """
SUMMARY FOR DAY %(day)s AT TIME %(time)s
TRIPS
Number of trips today: %(trips_today)s
Number of trips currently rolling: %(trips_now)s (these are the trips for which we will try to make predictions)
Number of trips currently rolling for wich we observed at least one stop: %(trips_now_observed)s
STOPTIMES
Number of stop times that day: %(stoptimes_today)s
- Passed:
- scheduled: %(stoptimes_passed)s
- observed: %(stoptimes_passed_observed)s
- Not passed yet:
- scheduled: %(stoptimes_not_passed)s
- observed (predictions on boards) %(stoptimes_not_passed_observed)s
STOPTIMES FOR ROLLING TRIPS
Total number of stops for rolling trips: %(stoptimes_now)s
- Passed: those we will use to make our prediction
- scheduled: %(stoptimes_now_passed)s
- observed: %(stoptimes_now_passed_observed)s
- Not passed yet: those for which we want to make a prediction
- scheduled: %(stoptimes_now_not_passed)s
- already observed on boards (prediction): %(stoptimes_now_not_passed_observed)s
PREDICTIONS
Number of stop times for which we want to make a prediction (not passed yet): %(stoptimes_now_not_passed)s
Number of trips currently rolling for wich we observed at least one stop: %(trips_now_observed)s
Representing %(stoptimes_predictable)s stop times for which we can provide a prediction.
LABELED
Given that retroactive is %(retroactive)s, we have %(stoptimes_predictable_labeled)s labeled to_predict stoptimes for training.
"""
self.summary = {
"day": self.day,
"time": self.time,
"trips_today": len(self.df.Trip_trip_id.unique()),
"trips_now": self.df
.query("(TS_trip_status > 0) & (TS_trip_status < 1)")
.Trip_trip_id.unique().shape[0],
"trips_now_observed": self.df
.query("(TS_trip_status > 0) & (TS_trip_status < 1) & (TS_sequence_diff.notnull())")
.Trip_trip_id.unique().shape[0],
"stoptimes_today": self.df.Trip_trip_id.count(),
"stoptimes_passed": self.df.TS_trip_passed_scheduled_stop.sum(),
"stoptimes_passed_observed": self
.df.TS_trip_passed_observed_stop.sum(),
"stoptimes_not_passed": (~self.df.TS_trip_passed_scheduled_stop).sum(),
"stoptimes_not_passed_observed":
(self.df.TS_trip_passed_observed_stop == False).sum(),
"stoptimes_now": self.df
.query("(TS_trip_status > 0) & (TS_trip_status < 1)")
.Trip_trip_id.count(),
"stoptimes_now_passed": self.df
.query("(TS_trip_status > 0) & (TS_trip_status < 1) &(TS_trip_passed_scheduled_stop==True)")
.Trip_trip_id.count(),
"stoptimes_now_passed_observed": self.df
.query("(TS_trip_status > 0) & (TS_trip_status < 1) &(TS_trip_passed_observed_stop==True)")
.Trip_trip_id.count(),
"stoptimes_now_not_passed": self.df
.query("(TS_trip_status > 0) & (TS_trip_status < 1) &(TS_trip_passed_scheduled_stop==False)")
.Trip_trip_id.count(),
"stoptimes_now_not_passed_observed": self.df
.query("(TS_trip_status > 0) & (TS_trip_status < 1) &(TS_trip_passed_observed_stop==False)")
.Trip_trip_id.count(),
"stoptimes_predictable": self.df
.query("(TS_trip_status > 0) & (TS_trip_status < 1) &(TS_trip_passed_scheduled_stop==False) &"
" (TS_sequence_diff.notnull())")
.Trip_trip_id.count(),
"stoptimes_predictable_labeled": self.df
.query("(TS_trip_status > 0) & (TS_trip_status < 1) &(TS_trip_passed_scheduled_stop==False) &"
" (TS_sequence_diff.notnull()) &(label.notnull())")
.Trip_trip_id.count(),
}
print(message % self.summary)
def get_predictable(self,
all_features_required=True,
labeled_only=True,
col_filter_level=2,
split_datasets=False,
set_index=True,
provided_df=None):
"""Return to_predict stop times.
:param all_features_required:
:param labeled_only:
:param col_filter_level:
:param split_datasets:
:param set_index:
:param provided_df:
"""
assert self._state_at_time_computed
if isinstance(provided_df, pd.DataFrame):
rdf = provided_df
else:
rdf = self.df
# Filter running trips, stations not passed yet
# Basic Conditions:
# - trip_status stricly between 0 and 1,
# - has not passed yet schedule (not True)
# - has not passed yet realtime (not True, it can be Nan or False)
rdf = rdf.query(
"TS_trip_status < 1 & TS_trip_status > 0 & TS_trip_passed_scheduled_stop !=\
True & TS_trip_passed_observed_stop != True")
if all_features_required:
# Only elements that have all features
for feature in self._feature_cols:
rdf = rdf.query("%s.notnull()" % feature)
if labeled_only:
rdf = rdf.query("label.notnull()")
if set_index:
rdf = self._df_set_index(rdf)
if col_filter_level:
# no filter, all columns
rdf = self._df_filter_cols(rdf, col_filter_level=col_filter_level)
if split_datasets:
# return dict
rdf = self._split_datasets(rdf)
logger.info("Predictable with labeled_only=%s, has a total of %s rows." % (labeled_only, len(rdf))
)
return rdf
def _df_filter_cols(self, rdf, col_filter_level):
# We need at least: index, features, and label
filtered_cols = self._feature_cols\
+ self._id_cols\
+ self._label_cols\
+ self._prediction_cols\
+ self._scoring_cols
if col_filter_level == 2:
# high filter: only necessary fields
return rdf[filtered_cols]
elif col_filter_level == 1:
# medium filter: add some useful cols
filtered_cols += self._other_useful_cols
return rdf[filtered_cols]
else:
raise ValueError("col_filter_level must be 0, 1 or 2")
def _df_set_index(self, rdf):
# copy columns so that it is available as value or index
# value columns are then filtered
assert isinstance(rdf, pd.DataFrame)
index_suffix = "_ix"
rdf.reset_index()
for col in self._id_cols:
rdf[col + index_suffix] = rdf[col]
new_ix = list(map(lambda x: x + index_suffix, self._id_cols))
rdf.set_index(new_ix, inplace=True)
return rdf
def _split_datasets(self, rdf):
res = {
"X": rdf[self._feature_cols],
"y_real": rdf[self._label_cols],
"y_pred": rdf[self._prediction_cols],
"y_score": rdf[self._scoring_cols]
}
return res
def compute_multiple_times_of_day(self, begin="00:00:00", end="23:59:00", min_diff=60, flush_former=True, **kwargs):
"""Compute dataframes for different times of day.
Default: begins at 00:00:00 and ends at 23:59:00 with a step of one
hour.
:param end:
:param min_diff:
:param flush_former:
:param begin:
"""
assert isinstance(min_diff, int)
diff = timedelta(minutes=min_diff)
# will raise error if wrong format
begin_dt = datetime.strptime(begin, "%H:%M:%S")
end_dt = datetime.strptime(end, "%H:%M:%S")
if flush_former:
self._flush_result_concat()
step_dt = begin_dt
while (end_dt >= step_dt):
step = step_dt.strftime("%H:%M:%S")
self.direct_compute_for_time(step)
step_df = self.get_predictable(**kwargs)
self._concat_dataframes(step_df)
step_dt += diff
return self.result_concat
def _concat_dataframes(self, df):
assert isinstance(df, pd.DataFrame)
# if no former result df, create empty df
if not hasattr(self, "result_concat"):
self.result_concat = pd.DataFrame()
# concat with previous results
self.result_concat = pd.concat([self.result_concat, df])
def _flush_result_concat(self):
self.result_concat = pd.DataFrame()
class RecursivePredictionMatrix(DayMatrixBuilder):
"""
NOT COMPLETED NOR USED FOR NOW
"""
def __init__(self, day=None, df=None):
DayMatrixBuilder.__init__(self, day=day, df=df)
def compute_all_possibles_sets(self):
"""Given the data obtained from schedule and realtime, this method will
compute data sets for recursive prediction.
Recursive predictions (to the contrary of direct predictions) are
relatively time agnostic. They primarily depend on previous stops.
The elements to be computed are:
- R_trip_previous_station_delay: the train previous stop delay:
-- will only accept previous stop
- R_previous_trip_last_station_delay: the forward train last estimated stop delay: difficult to compute?
-- RS_data_freshness
- R_: make a score of route section blocking potential
"""
self.df = self._initial_df.copy()
self._trip_previous_station()
def _trip_previous_station(self):
self.df.loc[:, "R_previous_station_sequence"] = self.df\
.query("StopTime_stop_sequence>0")\
.StopTime_stop_sequence - 1
previous_station = self.df\
.set_index(["Trip_trip_id", "StopTime_stop_sequence"])\
.loc[:, ["D_trip_delay", "StopTime_departure_time"]]\
.dropna()
self.df = self.df\
.join(
previous_station,
on=["Trip_trip_id", "R_previous_station_sequence"],
how="left", rsuffix="_previous_station"
)
class TripVizBuilder(DayMatrixBuilder):
def __init__(self, day=None, df=None):
DayMatrixBuilder.__init__(self, day=day, df=df)
def annote_for_route_section(self, passes_by_all=None, passes_by_one=None):
"""
Adds a column: stop custom sequence. It represents the station sequence
on the given route, on the given section.
Trip directions will be separated.
Filters trips passing by chosen stations.
To compute custom route sequence, we have to assign to each relevant
stop_id a sequence number.
Ideally we would like to select by stop_name, but they are not unique.
:param passes_by_all:
:param passes_by_one:
"""
pass
class TrainingSetBuilder:
def __init__(self, start, end, tempo=30):
"""
Start and end included.
:param start:
:param end:
:param tempo:
:return:
"""
dti = pd.date_range(start=start, end=end, freq="D")
self.days = dti.map(lambda x: x.strftime("%Y%m%d")).tolist()
assert isinstance(tempo, int)
self.tempo = tempo
self.bucket_name = __S3_BUCKETS__["training-sets"]
self._bucket_provider = S3Bucket(
self.bucket_name,
create_if_absent=True
)
def _create_day_training_set(self, day, save_s3):
mat = DirectPredictionMatrix(day)
mat.compute_multiple_times_of_day(min_diff=self.tempo)
__FULL_TRAINING_SET_FOLDER__ = __TRAINING_SET_FOLDER_PATH__ % self.tempo
if not path.exists(__RAW_DAYS_FOLDER_PATH__):
makedirs(__RAW_DAYS_FOLDER_PATH__)
if not path.exists(__FULL_TRAINING_SET_FOLDER__):
makedirs(__FULL_TRAINING_SET_FOLDER__)
__RAW_FILE_NAME__ = "%s.pickle" % day
__RAW_FILE_PATH__ = path.join(__RAW_DAYS_FOLDER_PATH__, __RAW_FILE_NAME__)
__TRAINING_SET_FILE_NAME__ = "%s.pickle" % day
__TRAINING_SET_FILE_PATH__ = path.join(__FULL_TRAINING_SET_FOLDER__, __TRAINING_SET_FILE_NAME__)
logger.info("Saving data in %s." % __RAW_DAYS_FOLDER_PATH__)
mat._initial_df.to_pickle(__RAW_FILE_PATH__)
mat.result_concat.to_pickle(__TRAINING_SET_FILE_PATH__)
if save_s3:
self._bucket_provider.send_file(
file_local_path=__RAW_FILE_PATH__,
file_remote_path=path.relpath(__RAW_FILE_PATH__, __DATA_PATH__)
)
self._bucket_provider.send_file(
file_local_path=__TRAINING_SET_FILE_PATH__,
file_remote_path=path.relpath(__TRAINING_SET_FILE_PATH__, __DATA_PATH__)
)
def create_training_sets(self, save_s3=True):
for day in self.days:
self._create_day_training_set(
day=day,
save_s3=save_s3
)
# TODO
# A - take into account trip direction when computing delays on line
# DONE: B - handle cases where no realtime information is found
# DONE: C - when retroactive is False, give api prediction as feature
# D - ability to save results
# E - investigate missing values/stations
# F - perform trip_id train_num comparison on RATP lines
# G - improve speed by having an option to make computations only on running
# trains
| mit |
scikit-hep/uproot | uproot3/tree.py | 1 | 103142 | #!/usr/bin/env python
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot3/blob/master/LICENSE
from __future__ import absolute_import
import base64
import codecs
import glob
import importlib
import inspect
import itertools
import math
import numbers
import os
import re
import struct
import sys
import threading
from collections import namedtuple
from collections import OrderedDict
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import numpy
import cachetools
import awkward0
import uproot_methods.profiles
import uproot3.rootio
from uproot3.rootio import _bytesid
from uproot3.rootio import _memsize
from uproot3.rootio import nofilter
from uproot3.rootio import _safename
from uproot3.interp.auto import interpret
from uproot3.interp.numerical import asdtype
from uproot3.interp.jagged import asjagged
from uproot3.interp.objects import asobj
from uproot3.interp.objects import asgenobj
from uproot3.source.cursor import Cursor
from uproot3.source.memmap import MemmapSource
from uproot3.source.xrootd import XRootDSource
from uproot3.source.http import HTTPSource
if sys.version_info[0] <= 2:
string_types = (unicode, str)
else:
string_types = (str, bytes)
def _delayedraise(excinfo):
if excinfo is not None:
cls, err, trc = excinfo
if sys.version_info[0] <= 2:
exec("raise cls, err, trc")
else:
raise err.with_traceback(trc)
def _filename_explode(x):
if isinstance(x, getattr(os, "PathLike", ())):
x = os.fspath(x)
elif hasattr(x, "__fspath__"):
x = x.__fspath__()
elif x.__class__.__module__ == "pathlib":
import pathlib
if isinstance(x, pathlib.Path):
x = str(x)
parsed = urlparse(x)
if _bytesid(parsed.scheme) == b"file" or len(parsed.scheme) == 0 or (os.name == "nt" and _filename_explode._windows_absolute.match(x) is not None):
if not (os.name == "nt" and _filename_explode._windows_absolute.match(x) is not None):
path = parsed.netloc + parsed.path
else:
path = x
pattern = os.path.expanduser(path)
if "*" in pattern or "?" in pattern or "[" in pattern:
out = sorted(glob.glob(pattern))
if len(out) == 0:
raise TypeError("no matches for filename {0}".format(repr(pattern)))
else:
out = [pattern]
return out
else:
return [x]
_filename_explode._windows_absolute = re.compile(r"^[A-Za-z]:\\")
def _normalize_awkwardlib(awkwardlib):
if awkwardlib is None:
return awkward0
elif isinstance(awkwardlib, str):
return importlib.import_module(awkwardlib)
else:
return awkwardlib
def _normalize_entrystartstop(numentries, entrystart, entrystop):
if entrystart is None:
entrystart = 0
elif entrystart < 0:
entrystart += numentries
entrystart = min(numentries, max(0, entrystart))
if entrystop is None:
entrystop = numentries
elif entrystop < 0:
entrystop += numentries
entrystop = min(numentries, max(0, entrystop))
if entrystop < entrystart:
raise IndexError("entrystop must be greater than or equal to entrystart")
return int(entrystart), int(entrystop)
################################################################ high-level interface
def iterate(path, treepath, branches=None, entrysteps=float("inf"), outputtype=dict, namedecode=None, reportpath=False, reportfile=False, reportentries=False, flatten=False, flatname=None, awkwardlib=None, cache=None, basketcache=None, keycache=None, executor=None, blocking=True, localsource=MemmapSource.defaults, xrootdsource=XRootDSource.defaults, httpsource=HTTPSource.defaults, **options):
awkward0 = _normalize_awkwardlib(awkwardlib)
for tree, branchesinterp, globalentrystart, thispath, thisfile in _iterate(path, treepath, branches, awkward0, localsource, xrootdsource, httpsource, **options):
for start, stop, arrays in tree.iterate(branches=branchesinterp, entrysteps=entrysteps, outputtype=outputtype, namedecode=namedecode, reportentries=True, entrystart=0, entrystop=tree.numentries, flatten=flatten, flatname=flatname, awkwardlib=awkward0, cache=cache, basketcache=basketcache, keycache=keycache, executor=executor, blocking=blocking):
if getattr(outputtype, "__name__", None) == "DataFrame" and getattr(outputtype, "__module__", None) == "pandas.core.frame":
if type(arrays.index).__name__ == "MultiIndex":
if hasattr(arrays.index.levels[0], "array"):
index = arrays.index.levels[0].array # pandas>=0.24.0
else:
index = arrays.index.levels[0].values # pandas<0.24.0
awkward0.numpy.add(index, globalentrystart, out=index)
elif type(arrays.index).__name__ == "RangeIndex":
if hasattr(arrays.index, "start") and hasattr(arrays.index, "stop"):
indexstart = arrays.index.start # pandas>=0.25.0
indexstop = arrays.index.stop
else:
indexstart = arrays.index._start # pandas<0.25.0
indexstop = arrays.index._stop
arrays.index = type(arrays.index)(indexstart + globalentrystart, indexstop + globalentrystart)
else:
if hasattr(arrays.index, "array"):
index = arrays.index.array # pandas>=0.24.0
else:
index = arrays.index.values # pandas<0.24.0
awkward0.numpy.add(index, globalentrystart, out=index)
out = (arrays,)
if reportentries:
out = (globalentrystart + start, globalentrystart + stop) + out
if reportfile:
out = (thisfile,) + out
if reportpath:
out = (thispath,) + out
if len(out) == 1:
yield out[0]
else:
yield out
def _iterate(path, treepath, branches, awkward0, localsource, xrootdsource, httpsource, **options):
if isinstance(path, string_types):
paths = _filename_explode(path)
else:
paths = [y for x in path for y in _filename_explode(x)]
globalentrystart = 0
for path in paths:
file = uproot3.rootio.open(path, localsource=localsource, xrootdsource=xrootdsource, httpsource=httpsource, **options)
try:
tree = file[treepath]
except KeyError:
continue
branchesinterp = OrderedDict()
for branch, interpretation in tree._normalize_branches(branches, awkward0):
branchesinterp[branch.name] = interpretation
yield tree, branchesinterp, globalentrystart, path, file
globalentrystart += tree.numentries
################################################################ methods for TTree
class TTreeMethods(object):
# makes __doc__ attribute mutable before Python 3.3
__metaclass__ = type.__new__(type, "type", (uproot3.rootio.ROOTObject.__metaclass__,), {})
_copycontext = True
_vector_regex = re.compile(b"^vector<(.+)>$")
_objectpointer_regex = re.compile(br"\(([^()]*)\)$")
def _attachstreamer(self, branch, streamer, streamerinfosmap, isTClonesArray):
if streamer is None:
m = re.match(self._vector_regex, getattr(branch, "_fClassName", b""))
if m is None:
if branch.name in streamerinfosmap:
streamer = streamerinfosmap[branch.name]
else:
return
else:
if m.group(1) in streamerinfosmap:
substreamer = streamerinfosmap[m.group(1)]
if isinstance(substreamer, uproot3.rootio.TStreamerInfo):
streamer = uproot3.rootio.TStreamerSTL.vector(None, substreamer._fName)
else:
streamer = uproot3.rootio.TStreamerSTL.vector(substreamer._fType, substreamer._fTypeName)
else:
return
if isinstance(streamer, uproot3.rootio.TStreamerInfo):
if len(streamer._fElements) == 1 and isinstance(streamer._fElements[0], uproot3.rootio.TStreamerBase) and streamer._fElements[0]._fName == b"TObjArray":
if streamer._fName == b"TClonesArray":
return self._attachstreamer(branch, streamerinfosmap.get(branch._fClonesName, None), streamerinfosmap, True)
else:
# FIXME: can only determine streamer by reading some values?
return
elif len(streamer._fElements) == 1 and isinstance(streamer._fElements[0], uproot3.rootio.TStreamerSTL) and streamer._fElements[0]._fName == b"This":
return self._attachstreamer(branch, streamer._fElements[0], streamerinfosmap, isTClonesArray)
if isinstance(streamer, uproot3.rootio.TStreamerObject):
if streamer._fTypeName == b"TClonesArray":
return self._attachstreamer(branch, streamerinfosmap.get(branch._fClonesName, None), streamerinfosmap, True)
else:
return self._attachstreamer(branch, streamerinfosmap.get(streamer._fTypeName, None), streamerinfosmap, True)
branch._streamer = streamer
branch._isTClonesArray = isTClonesArray
if isinstance(streamer, uproot3.rootio.TStreamerSTL) and streamer._fSTLtype == uproot3.const.kSTLvector:
branch._vecstreamer = streamerinfosmap.get(re.match(self._vector_regex, streamer._fTypeName).group(1), None)
isTClonesArray = True
else:
branch._vecstreamer = None
digDeeperTypes = (uproot3.rootio.TStreamerObject, uproot3.rootio.TStreamerObjectAny, uproot3.rootio.TStreamerObjectPointer, uproot3.rootio.TStreamerObjectAnyPointer)
members = None
if isinstance(streamer, uproot3.rootio.TStreamerInfo):
members = streamer.members
elif isinstance(streamer, digDeeperTypes):
typename = streamer._fTypeName.rstrip(b"*")
if typename in streamerinfosmap:
m = self._objectpointer_regex.search(streamer._fTitle)
if typename == b'TClonesArray' and m is not None:
typename = m.group(1)
members = streamerinfosmap[typename].members
elif isinstance(streamer, uproot3.rootio.TStreamerSTL):
try:
# FIXME: string manipulation only works for one-parameter templates
typename = streamer._fTypeName[streamer._fTypeName.index(b"<") + 1 : streamer._fTypeName.rindex(b">")].rstrip(b"*")
except ValueError:
pass
else:
if typename in streamerinfosmap:
members = streamerinfosmap[typename].members
if members is not None:
for subbranch in branch.itervalues(recursive=True):
name = subbranch._fName
if name.startswith(branch._fName + b"."): # drop parent branch's name
name = name[len(branch._fName) + 1:]
submembers = members
while True: # drop nested struct names one at a time
try:
index = name.index(b".")
except ValueError:
break
else:
base, name = name[:index], name[index + 1:]
if base in submembers and isinstance(submembers[base], digDeeperTypes):
key = submembers[base]._fTypeName.rstrip(b"*")
try:
submembers = streamerinfosmap[key].members
except KeyError:
for regex, substitution in uproot3.interp.auto.streamer_aliases:
new_key, n_matched = regex.subn(substitution, key)
if n_matched:
submembers = streamerinfosmap[new_key].members
self._context.classes[_safename(key)] = self._context.classes[_safename(new_key)]
break
else:
raise
try:
name = name[:name.index(b"[")]
except ValueError:
pass
self._attachstreamer(subbranch, submembers.get(name, None), streamerinfosmap, isTClonesArray)
def _addprovenance(self, branch, context, parents = None):
if parents is None:
parents = [context.treename]
if len(branch._provenance) == 0:
branch._provenance = parents
for x in branch.itervalues():
x._provenance = parents + [branch.name]
self._addprovenance(x, context, x._provenance)
def _postprocess(self, source, cursor, context, parent):
self._context = context
self._context.treename = self.name
self._context.speedbump = True
for branch in self._fBranches:
self._attachstreamer(branch, context.streamerinfosmap.get(getattr(branch, "_fClassName", None), None), context.streamerinfosmap, False)
self._addprovenance(branch, context)
self._branchlookup = {}
self._fill_branchlookup(self._branchlookup)
leaf2branch = {}
for branch in self.itervalues(recursive=True):
if len(branch._fLeaves) == 1:
leaf2branch[id(branch._fLeaves[0])] = branch
for branch in self.itervalues(recursive=True):
if len(branch._fLeaves) > 0:
branch._countleaf = branch._fLeaves[0]._fLeafCount
if branch._countleaf is not None:
branch._countbranch = leaf2branch.get(id(branch._countleaf), None)
if getattr(self, "_fAliases", None) is None:
self.aliases = {}
else:
self.aliases = dict((alias._fName, alias._fTitle) for alias in self._fAliases)
def _fill_branchlookup(self, branchlookup):
for subbranch in self._fBranches:
subbranch._fill_branchlookup(branchlookup)
branchlookup[subbranch.name] = subbranch
@property
def name(self):
return self._fName
@property
def title(self):
return self._fTitle
@property
def numentries(self):
return int(self._fEntries)
@property
def numbranches(self):
count = 0
for x in self.itervalues(recursive=True):
count += 1
return count
def iterkeys(self, recursive=False, filtername=nofilter, filtertitle=nofilter, aliases=True):
for branch_name, branch in self.iteritems(recursive, filtername, filtertitle, aliases):
yield branch_name
def itervalues(self, recursive=False, filtername=nofilter, filtertitle=nofilter):
for branch_name, branch in self.iteritems(recursive, filtername, filtertitle, aliases=False):
yield branch
def iteritems(self, recursive=False, filtername=nofilter, filtertitle=nofilter, aliases=True):
for branch in self._fBranches:
branch_name = branch.name
if aliases:
branch_name = self.aliases.get(branch_name, branch_name)
if filtername(branch_name) and filtertitle(branch.title):
yield branch_name, branch
if recursive:
try:
iterator = branch.iteritems(recursive, filtername, filtertitle, aliases=aliases)
except TypeError:
# Probably unknown `aliases` paramter
# Try without
iterator = branch.iteritems(recursive, filtername, filtertitle)
for n, b in iterator:
if recursive == '/':
n = branch_name + b'/' + n
yield n, b
def keys(self, recursive=False, filtername=nofilter, filtertitle=nofilter, aliases=True):
return list(self.iterkeys(recursive=recursive, filtername=filtername, filtertitle=filtertitle, aliases=aliases))
def _ipython_key_completions_(self):
"Support for completion of keys in an IPython kernel"
return [item.decode("ascii") for item in self.iterkeys()]
def values(self, recursive=False, filtername=nofilter, filtertitle=nofilter):
return list(self.itervalues(recursive=recursive, filtername=filtername, filtertitle=filtertitle))
def items(self, recursive=False, filtername=nofilter, filtertitle=nofilter, aliases=True):
return list(self.iteritems(recursive=recursive, filtername=filtername, filtertitle=filtertitle, aliases=aliases))
def allkeys(self, filtername=nofilter, filtertitle=nofilter, aliases=True):
return self.keys(recursive=True, filtername=filtername, filtertitle=filtertitle, aliases=aliases)
def allvalues(self, filtername=nofilter, filtertitle=nofilter):
return self.values(recursive=True, filtername=filtername, filtertitle=filtertitle)
def allitems(self, filtername=nofilter, filtertitle=nofilter, aliases=True):
return self.items(recursive=True, filtername=filtername, filtertitle=filtertitle, aliases=aliases)
def _get(self, name, recursive=True, filtername=nofilter, filtertitle=nofilter, aliases=True):
if b'/' in name:
# Look for exact subbranch
recursive = '/'
for n, b in self.iteritems(recursive=recursive, filtername=filtername, filtertitle=filtertitle, aliases=aliases):
if n == name:
self._branchlookup[name] = b
return b
raise uproot3.rootio._KeyError("not found: {0}\n in file: {1}".format(repr(name), self._context.sourcepath))
def get(self, name, recursive=True, filtername=nofilter, filtertitle=nofilter, aliases=True):
name = _bytesid(name)
try:
return self._branchlookup[name]
except KeyError:
return self._get(name, recursive, filtername, filtertitle, aliases)
def __contains__(self, name):
try:
self.get(name)
except KeyError:
return False
else:
return True
def mempartitions(self, numbytes, branches=None, entrystart=None, entrystop=None, keycache=None, linear=True):
m = _memsize(numbytes)
if m is not None:
numbytes = m
if numbytes <= 0:
raise ValueError("target numbytes must be positive")
awkward0 = _normalize_awkwardlib(None)
branches = list(self._normalize_branches(branches, awkward0))
entrystart, entrystop = _normalize_entrystartstop(self.numentries, entrystart, entrystop)
if not linear:
raise NotImplementedError("non-linear mempartition has not been implemented")
relevant_numbytes = 0.0
for branch, interpretation in branches:
if branch._recoveredbaskets is None:
branch._tryrecover()
for i, key in enumerate(branch._threadsafe_iterate_keys(keycache, False)):
start, stop = branch._entryoffsets[i], branch._entryoffsets[i + 1]
if entrystart < stop and start < entrystop:
this_numbytes = key._fObjlen * (min(stop, entrystop) - max(start, entrystart)) / float(stop - start)
assert this_numbytes >= 0.0
relevant_numbytes += this_numbytes
entrysteps = max(1, int(round(math.ceil((entrystop - entrystart) * numbytes / relevant_numbytes))))
start, stop = entrystart, entrystart
while stop < entrystop:
stop = min(stop + entrysteps, entrystop)
if stop > start:
yield start, stop
start = stop
def clusters(self, branches=None, entrystart=None, entrystop=None, strict=False):
awkward0 = _normalize_awkwardlib(None)
branches = list(self._normalize_branches(branches, awkward0))
# convenience class; simplifies presentation of the algorithm
class BranchCursor(object):
def __init__(self, branch):
self.branch = branch
self.basketstart = 0
self.basketstop = 0
@property
def entrystart(self):
return self.branch.basket_entrystart(self.basketstart)
@property
def entrystop(self):
return self.branch.basket_entrystop(self.basketstop)
cursors = [BranchCursor(branch) for branch, interpretation in branches if branch.numbaskets > 0]
if len(cursors) == 0:
yield _normalize_entrystartstop(self.numentries, entrystart, entrystop)
else:
# everybody starts at the same entry number; if there is no such place before someone runs out of baskets, there will be an exception
leadingstart = max(cursor.entrystart for cursor in cursors)
while not all(cursor.entrystart == leadingstart for cursor in cursors):
for cursor in cursors:
while cursor.entrystart < leadingstart:
cursor.basketstart += 1
cursor.basketstop += 1
leadingstart = max(cursor.entrystart for cursor in cursors)
entrystart, entrystop = _normalize_entrystartstop(self.numentries, entrystart, entrystop)
# move all cursors forward, yielding a (start, stop) pair if their baskets line up
while any(cursor.basketstop < cursor.branch.numbaskets for cursor in cursors):
# move all subleading baskets forward until they are no longer subleading
leadingstop = max(cursor.entrystop for cursor in cursors)
for cursor in cursors:
while cursor.entrystop < leadingstop:
cursor.basketstop += 1
# if they all line up, this is a good cluster
if all(cursor.entrystop == leadingstop for cursor in cursors):
# check to see if it's within the bounds the user requested (strictly or not strictly)
if strict:
if entrystart <= leadingstart and leadingstop <= entrystop:
yield leadingstart, leadingstop
else:
if entrystart < leadingstop and leadingstart < entrystop:
yield leadingstart, leadingstop
# anyway, move all the starts to the new stopping position and move all stops forward by one
leadingstart = leadingstop
for cursor in cursors:
cursor.basketstart = cursor.basketstop
cursor.basketstop += 1
# stop iterating if we're past all acceptable clusters
if leadingstart >= entrystop:
break
def array(self, branch, interpretation=None, entrystart=None, entrystop=None, flatten=False, awkwardlib=None, cache=None, basketcache=None, keycache=None, executor=None, blocking=True):
awkward0 = _normalize_awkwardlib(awkwardlib)
branches = list(self._normalize_branches(branch, awkward0))
if len(branches) == 1:
if interpretation is None:
tbranch, interpretation = branches[0]
else:
tbranch, _ = branches[0]
else:
raise ValueError("list of branch names or glob/regex matches more than one branch; use TTree.arrays (plural)")
return tbranch.array(interpretation=interpretation, entrystart=entrystart, entrystop=entrystop, flatten=flatten, awkwardlib=awkwardlib, cache=cache, basketcache=basketcache, keycache=keycache, executor=executor, blocking=blocking)
def arrays(self, branches=None, outputtype=dict, namedecode=None, entrystart=None, entrystop=None, flatten=False, flatname=None, awkwardlib=None, cache=None, basketcache=None, keycache=None, executor=None, blocking=True, recursive=True):
awkward0 = _normalize_awkwardlib(awkwardlib)
branches = list(self._normalize_branches(branches, awkward0))
for branch, interpretation in branches:
if branch._recoveredbaskets is None:
branch._tryrecover()
if flatten is None:
branches = [(branch, interpretation) for branch, interpretation in branches if not isinstance(interpretation, asjagged)]
flatten = False
# for the case of outputtype == pandas.DataFrame, do some preparation to fill DataFrames efficiently
ispandas = getattr(outputtype, "__name__", None) == "DataFrame" and getattr(outputtype, "__module__", None) == "pandas.core.frame"
entrystart, entrystop = _normalize_entrystartstop(self.numentries, entrystart, entrystop)
# start the job of filling the arrays
futures = None
if recursive and recursive is not True:
def wrap_name(branch, namedecode):
if len(branch._provenance) != 0:
if namedecode is None:
return recursive.join(branch._provenance + [branch.name])
else:
return recursive.join([p.decode(namedecode) for p in (branch._provenance + [branch.name])])
else:
return branch.name if namedecode is None else branch.name.decode(namedecode)
futures = [(wrap_name(branch, namedecode), interpretation, branch.array(interpretation=interpretation, entrystart=entrystart, entrystop=entrystop, flatten=(flatten and not ispandas), awkwardlib=awkward0, cache=cache, basketcache=basketcache, keycache=keycache, executor=executor, blocking=False)) for branch, interpretation in branches]
else:
futures = [(branch.name if namedecode is None else branch.name.decode(namedecode), interpretation, branch.array(interpretation=interpretation, entrystart=entrystart, entrystop=entrystop, flatten=(flatten and not ispandas), awkwardlib=awkward0, cache=cache, basketcache=basketcache, keycache=keycache, executor=executor, blocking=False)) for branch, interpretation in branches]
# make functions that wait for the filling job to be done and return the right outputtype
if outputtype == namedtuple:
outputtype = namedtuple("Arrays", [codecs.ascii_decode(branch.name, "replace")[0] if namedecode is None else branch.name.decode(namedecode) for branch, interpretation in branches])
def wait():
return outputtype(*[future() for name, interpretation, future in futures])
elif ispandas:
import uproot3._connect._pandas
def wait():
return uproot3._connect._pandas.futures2df(futures, outputtype, entrystart, entrystop, flatten, flatname, awkward0)
elif isinstance(outputtype, type) and issubclass(outputtype, dict):
def wait():
return outputtype((name, future()) for name, interpretation, future in futures)
elif isinstance(outputtype, type) and issubclass(outputtype, (list, tuple)):
def wait():
return outputtype(future() for name, interpretation, future in futures)
else:
def wait():
return outputtype(*[future() for name, interpretation, future in futures])
# if blocking, return the result of that function; otherwise, the function itself
if blocking:
return wait()
else:
return wait
def lazyarray(self, branch, interpretation=None, entrysteps=None, entrystart=None, entrystop=None, flatten=False, awkwardlib=None, cache=None, basketcache=None, keycache=None, executor=None, persistvirtual=False, chunked=True):
awkward0 = _normalize_awkwardlib(awkwardlib)
branches = list(self._normalize_branches(branch, awkward0))
if len(branches) == 1:
if interpretation is None:
tbranch, interpretation = branches[0]
else:
tbranch, _ = branches[0]
else:
raise ValueError("list of branch names or glob/regex matches more than one branch; use TTree.lazyarrays (plural)")
return tbranch.lazyarray(interpretation=interpretation, entrysteps=entrysteps, entrystart=entrystart, entrystop=entrystop, flatten=flatten, awkwardlib=awkwardlib, cache=cache, basketcache=basketcache, keycache=keycache, executor=executor, persistvirtual=persistvirtual, chunked=chunked)
def lazyarrays(self, branches=None, namedecode="utf-8", entrysteps=None, entrystart=None, entrystop=None, flatten=False, profile=None, awkwardlib=None, cache=None, basketcache=None, keycache=None, executor=None, persistvirtual=False, chunked=True):
entrystart, entrystop = _normalize_entrystartstop(self.numentries, entrystart, entrystop)
if not chunked and entrysteps is None:
entrysteps = float('inf')
entrysteps = list(self._normalize_entrysteps(entrysteps, branches, entrystart, entrystop, keycache))
awkward0 = _normalize_awkwardlib(awkwardlib)
branches = list(self._normalize_branches(branches, awkward0))
for branch, interpretation in branches:
if branch._recoveredbaskets is None:
branch._tryrecover()
lazytree = _LazyTree(self._context.sourcepath, self._context.treename, self, dict((b.name, x) for b, x in branches), flatten, awkward0.__name__, basketcache, keycache, executor)
out = awkward0.Table()
for branch, interpretation in branches:
inner = interpretation
while isinstance(inner, asjagged):
inner = inner.content
if isinstance(inner, asobj) and getattr(inner.cls, "_arraymethods", None) is not None:
VirtualArray = awkward0.Methods.mixin(inner.cls._arraymethods, awkward0.VirtualArray)
elif isinstance(inner, asgenobj) and getattr(inner.generator.cls, "_arraymethods", None) is not None:
VirtualArray = awkward0.Methods.mixin(inner.generator.cls._arraymethods, awkward0.VirtualArray)
else:
VirtualArray = awkward0.VirtualArray
name = branch.name.decode("ascii") if namedecode is None else branch.name.decode(namedecode)
if chunked:
chunks = []
counts = []
for start, stop in entrysteps:
chunks.append(VirtualArray(lazytree, (branch.name, start, stop), cache=cache, type=awkward0.type.ArrayType(stop - start, interpretation.type), persistvirtual=persistvirtual))
counts.append(stop - start)
out[name] = awkward0.ChunkedArray(chunks, counts)
out[name].__doc__ = branch.title.decode('ascii')
else:
start, stop = entrysteps[0]
out[name] = VirtualArray(lazytree, (branch.name, start, stop), cache=cache, type=awkward0.type.ArrayType(stop - start, interpretation.type), persistvirtual=persistvirtual)
out[name].__doc__ = branch.title.decode('ascii')
if profile is not None:
out = uproot_methods.profiles.transformer(profile)(out)
return out
def _normalize_entrysteps(self, entrysteps, branches, entrystart, entrystop, keycache):
numbytes = _memsize(entrysteps)
if numbytes is not None:
return self.mempartitions(numbytes, branches=branches, entrystart=entrystart, entrystop=entrystop, keycache=keycache, linear=True)
if isinstance(entrysteps, string_types):
raise ValueError("string {0} does not match the memory size pattern (number followed by B/kB/MB/GB/etc.)".format(repr(entrysteps)))
if entrysteps is None:
return self.clusters(branches, entrystart=entrystart, entrystop=entrystop, strict=False)
elif entrysteps == float("inf"):
return [(entrystart, min(entrystop, self.numentries))]
elif isinstance(entrysteps, (numbers.Integral, numpy.integer)):
entrystepsize = entrysteps
if entrystepsize <= 0:
raise ValueError("if an integer, entrysteps must be positive")
effectivestop = min(entrystop, self.numentries)
starts = numpy.arange(entrystart, effectivestop, entrystepsize)
stops = numpy.append(starts[1:], effectivestop)
return zip(starts, stops)
else:
try:
iter(entrysteps)
except TypeError:
raise TypeError("entrysteps must be None for cluster iteration, a positive integer for equal steps in number of entries (inf for maximal), a memory size string (number followed by B/kB/MB/GB/etc.), or an iterable of 2-tuples for explicit entry starts (inclusive) and stops (exclusive)")
return entrysteps
def iterate(self, branches=None, entrysteps=None, outputtype=dict, namedecode=None, reportentries=False, entrystart=None, entrystop=None, flatten=False, flatname=None, awkwardlib=None, cache=None, basketcache=None, keycache=None, executor=None, blocking=True):
if keycache is None:
keycache = {}
if basketcache is None:
basketcache = {}
explicit_basketcache = False
else:
explicit_basketcache = True
entrystart, entrystop = _normalize_entrystartstop(self.numentries, entrystart, entrystop)
entrysteps = self._normalize_entrysteps(entrysteps, branches, entrystart, entrystop, keycache)
awkward0 = _normalize_awkwardlib(awkwardlib)
branches = list(self._normalize_branches(branches, awkward0))
for branch, interpretation in branches:
if branch._recoveredbaskets is None:
branch._tryrecover()
# for the case of outputtype == pandas.DataFrame, do some preparation to fill DataFrames efficiently
ispandas = getattr(outputtype, "__name__", None) == "DataFrame" and getattr(outputtype, "__module__", None) == "pandas.core.frame"
def evaluate(branch, interpretation, future, past, cachekey, pythonize):
if future is None:
return past
else:
out = interpretation.finalize(future(), branch)
if cache is not None:
cache[cachekey] = out
if flatten and isinstance(interpretation, asjagged):
return out.flatten()
elif pythonize:
return list(out)
else:
return out
if outputtype == namedtuple:
outputtype = namedtuple("Arrays", [codecs.ascii_decode(branch.name, "replace")[0] if namedecode is None else branch.name.decode(namedecode) for branch, interpretation in branches])
def wrap_for_python_scope(futures, start, stop):
return lambda: outputtype(*[evaluate(branch, interpretation, future, past, cachekey, False) for branch, interpretation, future, past, cachekey in futures])
elif ispandas:
import uproot3._connect._pandas
def wrap_for_python_scope(futures, start, stop):
def wrap_again(branch, interpretation, future):
return lambda: interpretation.finalize(future(), branch)
return lambda: uproot3._connect._pandas.futures2df([(branch.name, interpretation, wrap_again(branch, interpretation, future)) for branch, interpretation, future, past, cachekey in futures], outputtype, start, stop, flatten, flatname, awkward0)
elif isinstance(outputtype, type) and issubclass(outputtype, dict):
def wrap_for_python_scope(futures, start, stop):
return lambda: outputtype((branch.name if namedecode is None else branch.name.decode(namedecode), evaluate(branch, interpretation, future, past, cachekey, False)) for branch, interpretation, future, past, cachekey in futures)
elif isinstance(outputtype, type) and issubclass(outputtype, (list, tuple)):
def wrap_for_python_scope(futures, start, stop):
return lambda: outputtype(evaluate(branch, interpretation, future, past, cachekey, False) for branch, interpretation, future, past, cachekey in futures)
else:
def wrap_for_python_scope(futures, start, stop):
return lambda: outputtype(*[evaluate(branch, interpretation, future, past, cachekey, False) for branch, interpretation, future, past, cachekey in futures])
for start, stop in entrysteps:
start = max(start, entrystart)
stop = min(stop, entrystop)
if start > stop:
continue
futures = []
for branch, interpretation in branches:
cachekey = branch._cachekey(interpretation, start, stop)
if branch.numbaskets == 0:
futures.append((branch, interpretation, interpretation.empty, None, cachekey))
else:
basketstart, basketstop = branch._basketstartstop(start, stop)
basket_itemoffset = branch._basket_itemoffset(interpretation, basketstart, basketstop, keycache)
basket_entryoffset = branch._basket_entryoffset(basketstart, basketstop)
if cache is not None:
out = cache.get(cachekey, None)
if out is not None:
futures.append((branch, interpretation, None, out, cachekey))
continue
future = branch._step_array(interpretation, basket_itemoffset, basket_entryoffset, start, stop, awkward0, basketcache, keycache, executor, explicit_basketcache)
futures.append((branch, interpretation, future, None, cachekey))
out = wrap_for_python_scope(futures, start, stop)
if blocking:
out = out()
if reportentries:
yield start, stop, out
else:
yield out
def _format(self, indent=""):
# TODO: add TTree data to the bottom of this
out = []
for branch in self._fBranches:
out.extend(branch._format(indent))
return out
def show(self, foldnames=False, stream=sys.stdout):
if stream is None:
return "\n".join(self._format(foldnames))
else:
for line in self._format(foldnames):
stream.write(line)
stream.write("\n")
def _recover(self):
for branch in self.allvalues():
branch._recover()
def matches(self, branches):
awkward0 = _normalize_awkwardlib(None)
return [b.name for b, i in self._normalize_branches(branches, awkward0, allownone=False, allowcallable=False, allowdict=False, allowstring=True)]
_branch_regex = re.compile(b"^/(.*)/([iLmsux]*)$")
@staticmethod
def _branch_flags(flags):
flagsbyte = 0
for flag in flags:
if flag == "i":
flagsbyte += re.I
elif flag == "L":
flagsbyte += re.L
elif flag == "m":
flagsbyte += re.M
elif flag == "s":
flagsbyte += re.S
elif flag == "u":
flagsbyte += re.U
elif flag == "x":
flagsbyte += re.X
return flagsbyte
def _normalize_branches(self, arg, awkward0, allownone=True, allowcallable=True, allowdict=True, allowstring=True, aliases=True):
if allownone and arg is None: # no specification; read all branches
for branch in self.allvalues(): # that have interpretations
interpretation = interpret(branch, awkward0)
if interpretation is not None:
yield branch, interpretation
elif allowcallable and callable(arg):
for branch in self.allvalues():
result = arg(branch)
if result is None or result is False:
pass
elif result is True: # function is a filter
interpretation = interpret(branch, awkward0)
if interpretation is not None:
yield branch, interpretation
else: # function is giving interpretations
yield branch, branch._normalize_dtype(result, awkward0)
elif allowdict and isinstance(arg, dict):
for word, interpretation in arg.items():
word = _bytesid(word)
isregex = re.match(self._branch_regex, word)
if isregex is not None:
regex, flags = isregex.groups()
for name, branch in self.iteritems(recursive=True, aliases=aliases):
if re.match(regex, name, self._branch_flags(flags)):
yield branch, branch._normalize_dtype(interpretation, awkward0)
elif b"*" in word or b"?" in word or b"[" in word:
for name, branch in self.iteritems(recursive=True, aliases=aliases):
if name == word or glob.fnmatch.fnmatchcase(name, word):
yield branch, branch._normalize_dtype(interpretation, awkward0)
else:
branch = self.get(word, aliases=aliases)
yield branch, branch._normalize_dtype(interpretation, awkward0)
elif allowstring and isinstance(arg, string_types):
for x in self._normalize_branches([arg], awkward0):
yield x
else:
try:
words = iter(arg) # only way to check for iterable (in general)
except Exception:
raise TypeError("'branches' argument not understood")
else:
for word in words:
word = _bytesid(word)
isregex = re.match(self._branch_regex, word)
if isregex is not None:
regex, flags = isregex.groups()
for name, branch in self.iteritems(recursive=True, aliases=aliases):
if re.match(regex, name, self._branch_flags(flags)):
interpretation = interpret(branch, awkward0)
if interpretation is None:
if name == word:
raise ValueError("cannot interpret branch {0} as a Python type\n in file: {1}".format(repr(branch.name), self._context.sourcepath))
else:
yield branch, interpretation
elif b"*" in word or b"?" in word or b"[" in word:
for name, branch in self.iteritems(recursive=True, aliases=aliases):
if name == word or glob.fnmatch.fnmatchcase(name, word):
interpretation = interpret(branch, awkward0)
if interpretation is None:
if name == word:
raise ValueError("cannot interpret branch {0} as a Python type\n in file: {1}".format(repr(branch.name), self._context.sourcepath))
else:
yield branch, interpretation
else:
branch = self.get(word, aliases=aliases)
interpretation = interpret(branch, awkward0)
if interpretation is None:
raise ValueError("cannot interpret branch {0} as a Python type\n in file: {1}".format(repr(branch.name), self._context.sourcepath))
else:
yield branch, interpretation
def __len__(self):
return self.numentries
def __getitem__(self, name):
return self.get(name)
def __iter__(self):
# prevent Python's attempt to interpret __len__ and __getitem__ as iteration
raise TypeError("'TTree' object is not iterable")
@property
def pandas(self):
import uproot3._connect._pandas
return uproot3._connect._pandas.TTreeMethods_pandas(self)
################################################################ methods for TBranch
class TBranchMethods(object):
# makes __doc__ attribute mutable before Python 3.3
__metaclass__ = type.__new__(type, "type", (uproot3.rootio.ROOTObject.__metaclass__,), {})
def _postprocess(self, source, cursor, context, parent):
self._source = source
self._context = context
self._streamer = None
self._interpretation = None
self._provenance = []
self._numgoodbaskets = 0
for i, x in enumerate(self._fBasketSeek):
if x == 0 or i == self._fWriteBasket:
break
self._numgoodbaskets += 1
if self.numentries == self._fBasketEntry[self._numgoodbaskets]:
self._recoveredbaskets = []
self._entryoffsets = self._fBasketEntry[: self._numgoodbaskets + 1].tolist()
self._recoverylock = None
else:
self._recoveredbaskets = None
self._entryoffsets = None
self._recoverylock = threading.Lock()
self._countbranch = None
self._tree_iofeatures = 0
if hasattr(parent, "_fIOFeatures"):
self._tree_iofeatures = parent._fIOFeatures._fIOBits
def _fill_branchlookup(self, branchlookup):
for subbranch in self._fBranches:
subbranch._fill_branchlookup(branchlookup)
branchlookup[subbranch.name] = subbranch
@property
def name(self):
return self._fName
@property
def title(self):
return self._fTitle
@property
def interpretation(self):
awkward0 = _normalize_awkwardlib(None)
if self._interpretation is None:
self._interpretation = interpret(self, awkward0)
return self._interpretation
@property
def countbranch(self):
return self._countbranch
@property
def countleaf(self):
return self._countleaf
@property
def numentries(self):
return int(self._fEntries) # or self._fEntryNumber?
@property
def numbranches(self):
count = 0
for x in self.itervalues(recursive=True):
count += 1
return count
def iterkeys(self, recursive=False, filtername=nofilter, filtertitle=nofilter):
for branch_name, branch in self.iteritems(recursive, filtername, filtertitle):
yield branch_name
def itervalues(self, recursive=False, filtername=nofilter, filtertitle=nofilter):
for branch_name, branch in self.iteritems(recursive, filtername, filtertitle):
yield branch
def iteritems(self, recursive=False, filtername=nofilter, filtertitle=nofilter):
for branch in self._fBranches:
branch_name = branch.name
if filtername(branch_name) and filtertitle(branch.title):
yield branch_name, branch
if recursive:
iterator = branch.iteritems(recursive, filtername, filtertitle)
for n, b in iterator:
if recursive == '/':
n = branch_name + b'/' + n
yield n, b
def keys(self, recursive=False, filtername=nofilter, filtertitle=nofilter):
return list(self.iterkeys(recursive=recursive, filtername=filtername, filtertitle=filtertitle))
def _ipython_key_completions_(self):
"Support for completion of keys in an IPython kernel"
return [item.decode("ascii") for item in self.iterkeys()]
def values(self, recursive=False, filtername=nofilter, filtertitle=nofilter):
return list(self.itervalues(recursive=recursive, filtername=filtername, filtertitle=filtertitle))
def items(self, recursive=False, filtername=nofilter, filtertitle=nofilter):
return list(self.iteritems(recursive=recursive, filtername=filtername, filtertitle=filtertitle))
def allkeys(self, recursive=False, filtername=nofilter, filtertitle=nofilter):
return self.keys(recursive=True, filtername=filtername, filtertitle=filtertitle)
def allvalues(self, filtername=nofilter, filtertitle=nofilter):
return self.values(recursive=True, filtername=filtername, filtertitle=filtertitle)
def allitems(self, filtername=nofilter, filtertitle=nofilter):
return self.items(recursive=True, filtername=filtername, filtertitle=filtertitle)
def _get(self, name, recursive=True, filtername=nofilter, filtertitle=nofilter):
if b'/' in name:
# Look for exact subbranch
recursive = '/'
for n, b in self.iteritems(recursive=recursive, filtername=filtername, filtertitle=filtertitle):
if n == name:
return b
raise uproot3.rootio._KeyError("not found: {0}\n in file: {1}".format(repr(name), self._context.sourcepath))
def get(self, name, recursive=True, filtername=nofilter, filtertitle=nofilter):
name = _bytesid(name)
return self._get(name, recursive, filtername, filtertitle)
@property
def numbaskets(self):
if self._recoveredbaskets is None:
self._tryrecover()
return self._numgoodbaskets + len(self._recoveredbaskets)
def _cachekey(self, interpretation, entrystart, entrystop):
return "{0};{1};{2};{3};{4}-{5}".format(base64.b64encode(self._context.uuid).decode("ascii"), self._context.treename.decode("ascii"), self.name.decode("ascii"), interpretation.identifier, entrystart, entrystop)
def _basketcachekey(self, i):
return "{0};{1};{2};{3};raw".format(base64.b64encode(self._context.uuid).decode("ascii"), self._context.treename.decode("ascii"), self.name.decode("ascii"), i)
def _keycachekey(self, i):
return "{0};{1};{2};{3};key".format(base64.b64encode(self._context.uuid).decode("ascii"), self._context.treename.decode("ascii"), self.name.decode("ascii"), i)
def _threadsafe_key(self, i, keycache, complete):
key = None
if keycache is not None:
key = keycache.get(self._keycachekey(i), None)
if key is None:
keysource = self._source.threadlocal()
try:
key = self._basketkey(keysource, i, complete)
if keycache is not None:
keycache[self._keycachekey(i)] = key
finally:
keysource.dismiss()
return key
def _threadsafe_iterate_keys(self, keycache, complete, basketstart=None, basketstop=None):
if basketstart is None:
basketstart = 0
if basketstop is None:
basketstop = self.numbaskets
done = False
if keycache is not None:
keys = [keycache.get(self._keycachekey(i), None) for i in range(basketstart, basketstop)]
if all(x is not None for x in keys):
if not complete or all(hasattr(x, "border") for x in keys):
for key in keys:
yield key
done = True
if not done:
keysource = self._source.threadlocal()
try:
for i in range(basketstart, basketstop):
key = None if keycache is None else keycache.get(self._keycachekey(i), None)
if key is None or (complete and not hasattr(key, "border")):
key = self._basketkey(keysource, i, complete)
if keycache is not None:
keycache[self._keycachekey(i)] = key
yield key
else:
yield key
finally:
keysource.dismiss()
def uncompressedbytes(self, keycache=None):
return sum(key._fObjlen for key in self._threadsafe_iterate_keys(keycache, False))
def compressedbytes(self, keycache=None):
return sum(key._fNbytes - key._fKeylen for key in self._threadsafe_iterate_keys(keycache, False))
def compressionratio(self, keycache=None):
numer, denom = 0, 0
for key in self._threadsafe_iterate_keys(keycache, False):
numer += key._fObjlen
denom += key._fNbytes - key._fKeylen
return float(numer) / float(denom)
def _normalize_dtype(self, interpretation, awkward0):
if inspect.isclass(interpretation) and issubclass(interpretation, awkward0.numpy.generic):
return self._normalize_dtype(awkward0.numpy.dtype(interpretation), awkward0)
elif isinstance(interpretation, awkward0.numpy.dtype): # user specified a Numpy dtype
default = interpret(self, awkward0)
if isinstance(default, (asdtype, asjagged)):
return default.to(interpretation)
else:
raise ValueError("cannot cast branch {0} (default interpretation {1}) as dtype {2}".format(repr(self.name), default, interpretation))
elif isinstance(interpretation, awkward0.numpy.ndarray): # user specified a Numpy array
default = interpret(self, awkward0)
if isinstance(default, asdtype):
return default.toarray(interpretation)
else:
raise ValueError("cannot cast branch {0} (default interpretation {1}) as dtype {2}".format(repr(self.name), default, interpretation))
elif not isinstance(interpretation, uproot3.interp.interp.Interpretation):
raise TypeError("branch interpretation must be an Interpretation, not {0} (type {1})".format(interpretation, type(interpretation)))
else:
return interpretation
def _normalize_interpretation(self, interpretation, awkward0):
if interpretation is None:
interpretation = interpret(self, awkward0)
else:
interpretation = self._normalize_dtype(interpretation, awkward0)
if interpretation is None:
raise ValueError("cannot interpret branch {0} as a Python type\n in file: {1}".format(repr(self.name), self._context.sourcepath))
if interpretation.awkward0 is not awkward0:
interpretation = interpretation.awkwardlib(awkward0)
return interpretation
def numitems(self, interpretation=None, keycache=None):
awkward0 = _normalize_awkwardlib(None)
interpretation = self._normalize_interpretation(interpretation, awkward0)
if interpretation is None:
raise ValueError("cannot interpret branch {0} as a Python type\n in file: {1}".format(repr(self.name), self._context.sourcepath))
if self._recoveredbaskets is None:
self._tryrecover()
return sum(interpretation.numitems(key.border, self.basket_numentries(i)) for i, key in enumerate(self._threadsafe_iterate_keys(keycache, True)))
@property
def compression(self):
try:
return uproot3.source.compressed.Compression(self._fCompress)
except ValueError:
return self._context.compression
def basket_entrystart(self, i):
if self._recoveredbaskets is None:
self._tryrecover()
if 0 <= i < self.numbaskets:
return self._entryoffsets[i]
else:
raise IndexError("index {0} out of range for branch with {1} baskets".format(i, self.numbaskets))
def basket_entrystop(self, i):
if self._recoveredbaskets is None:
self._tryrecover()
if 0 <= i < self.numbaskets:
return self._entryoffsets[i + 1]
else:
raise IndexError("index {0} out of range for branch with {1} baskets".format(i, self.numbaskets))
def basket_numentries(self, i):
if self._recoveredbaskets is None:
self._tryrecover()
if 0 <= i < self.numbaskets:
return self._entryoffsets[i + 1] - self._entryoffsets[i]
else:
raise IndexError("index {0} out of range for branch with {1} baskets".format(i, self.numbaskets))
def basket_uncompressedbytes(self, i, keycache=None):
if self._recoveredbaskets is None:
self._tryrecover()
return self._threadsafe_key(i, keycache, False)._fObjlen
def basket_compressedbytes(self, i, keycache=None):
if self._recoveredbaskets is None:
self._tryrecover()
key = self._threadsafe_key(i, keycache, False)
return key._fNbytes - key._fKeylen
def basket_numitems(self, i, interpretation=None, keycache=None):
if self._recoveredbaskets is None:
self._tryrecover()
awkward0 = _normalize_awkwardlib(None)
interpretation = self._normalize_interpretation(interpretation, awkward0)
key = self._threadsafe_key(i, keycache, True)
return interpretation.numitems(key.border, self.basket_numentries(i))
def _localentries(self, i, entrystart, entrystop):
local_entrystart = max(0, entrystart - self.basket_entrystart(i))
local_entrystop = max(0, min(entrystop - self.basket_entrystart(i), self.basket_entrystop(i) - self.basket_entrystart(i)))
return local_entrystart, local_entrystop
def _basket(self, i, interpretation, local_entrystart, local_entrystop, awkward0, basketcache, keycache):
basketdata = None
if basketcache is not None:
basketcachekey = self._basketcachekey(i)
basketdata = basketcache.get(basketcachekey, None)
key = self._threadsafe_key(i, keycache, True)
if basketdata is None:
basketdata = key.basketdata()
if basketcache is not None:
basketcache[basketcachekey] = basketdata
if key._fObjlen == key.border:
data, byteoffsets = basketdata, None
if self._countbranch is not None and awkward0.numpy.uint8(self._tree_iofeatures) & awkward0.numpy.uint8(uproot3.const.kGenerateOffsetMap) != 0:
counts = self._countbranch.array(entrystart=(local_entrystart + self.basket_entrystart(i)),
entrystop=(local_entrystop + self.basket_entrystart(i)))
itemsize = 1
if isinstance(interpretation, asjagged):
itemsize = interpretation.content.fromdtype.itemsize
awkward0.numpy.multiply(counts, itemsize, counts)
byteoffsets = awkward0.numpy.empty(len(counts) + 1, dtype=awkward0.numpy.int32)
byteoffsets[0] = 0
awkward0.numpy.cumsum(counts, out=byteoffsets[1:])
else:
data = basketdata[:key.border]
byteoffsets = awkward0.numpy.empty((key._fObjlen - key.border - 4) // 4, dtype=awkward0.numpy.int32) # native endian
byteoffsets[:-1] = basketdata[key.border + 4 : -4].view(">i4") # read as big-endian and convert
byteoffsets[-1] = key._fLast
awkward0.numpy.subtract(byteoffsets, key._fKeylen, byteoffsets)
return interpretation.fromroot(data, byteoffsets, local_entrystart, local_entrystop, key._fKeylen)
def basket(self, i, interpretation=None, entrystart=None, entrystop=None, flatten=False, awkwardlib=None, cache=None, basketcache=None, keycache=None):
awkward0 = _normalize_awkwardlib(awkwardlib)
interpretation = self._normalize_interpretation(interpretation, awkward0)
if interpretation is None:
raise ValueError("cannot interpret branch {0} as a Python type\n in file: {1}".format(repr(self.name), self._context.sourcepath))
if self._recoveredbaskets is None:
self._tryrecover()
if not 0 <= i < self.numbaskets:
raise IndexError("index {0} out of range for branch with {1} baskets".format(i, self.numbaskets))
entrystart, entrystop = _normalize_entrystartstop(self.numentries, entrystart, entrystop)
local_entrystart, local_entrystop = self._localentries(i, entrystart, entrystop)
entrystart = self.basket_entrystart(i) + local_entrystart
entrystop = self.basket_entrystart(i) + local_entrystop
numentries = local_entrystop - local_entrystart
if cache is not None:
cachekey = self._cachekey(interpretation, entrystart, entrystop)
out = cache.get(cachekey, None)
if out is not None:
if flatten and isinstance(interpretation, asjagged):
return out.content
else:
return out
source = self._basket(i, interpretation, local_entrystart, local_entrystop, awkward0, basketcache, keycache)
numitems = interpretation.source_numitems(source)
destination = interpretation.destination(numitems, numentries)
interpretation.fill(source, destination, 0, numitems, 0, numentries)
out = interpretation.finalize(destination, self)
if cache is not None:
cache[cachekey] = out
if flatten and isinstance(interpretation, asjagged):
return out.content
else:
return out
def _basketstartstop(self, entrystart, entrystop):
basketstart, basketstop = None, None
for i in range(self.numbaskets):
if basketstart is None:
if entrystart < self.basket_entrystop(i) and self.basket_entrystart(i) < entrystop:
basketstart = i
basketstop = i
else:
if self.basket_entrystart(i) < entrystop:
basketstop = i
if basketstop is not None:
basketstop += 1 # stop is exclusive
return basketstart, basketstop
def baskets(self, interpretation=None, entrystart=None, entrystop=None, flatten=False, awkwardlib=None, cache=None, basketcache=None, keycache=None, reportentries=False, executor=None, blocking=True):
awkward0 = _normalize_awkwardlib(awkwardlib)
interpretation = self._normalize_interpretation(interpretation, awkward0)
if interpretation is None:
raise ValueError("cannot interpret branch {0} as a Python type\n in file: {1}".format(repr(self.name), self._context.sourcepath))
if self._recoveredbaskets is None:
self._tryrecover()
entrystart, entrystop = _normalize_entrystartstop(self.numentries, entrystart, entrystop)
basketstart, basketstop = self._basketstartstop(entrystart, entrystop)
if basketstart is None:
if blocking:
return []
else:
def wait():
return []
return wait
out = [None] * (basketstop - basketstart)
def fill(j):
try:
basket = self.basket(j + basketstart, interpretation=interpretation, entrystart=entrystart, entrystop=entrystop, flatten=flatten, awkwardlib=awkward0, cache=cache, basketcache=basketcache, keycache=keycache)
if reportentries:
local_entrystart, local_entrystop = self._localentries(j + basketstart, entrystart, entrystop)
basket = (local_entrystart + self.basket_entrystart(j + basketstart),
local_entrystop + self.basket_entrystart(j + basketstart),
basket)
except Exception:
return sys.exc_info()
else:
out[j] = basket
return None
if executor is None:
for j in range(basketstop - basketstart):
_delayedraise(fill(j))
excinfos = ()
else:
excinfos = executor.map(fill, range(basketstop - basketstart))
if blocking:
for excinfo in excinfos:
_delayedraise(excinfo)
return out
else:
def wait():
for excinfo in excinfos:
_delayedraise(excinfo)
return out
return wait
def iterate_baskets(self, interpretation=None, entrystart=None, entrystop=None, flatten=False, awkwardlib=None, cache=None, basketcache=None, keycache=None, reportentries=False):
awkward0 = _normalize_awkwardlib(awkwardlib)
interpretation = self._normalize_interpretation(interpretation, awkward0)
if interpretation is None:
raise ValueError("cannot interpret branch {0} as a Python type\n in file: {1}".format(repr(self.name), self._context.sourcepath))
if self._recoveredbaskets is None:
self._tryrecover()
entrystart, entrystop = _normalize_entrystartstop(self.numentries, entrystart, entrystop)
for i in range(self.numbaskets):
if entrystart < self.basket_entrystop(i) and self.basket_entrystart(i) < entrystop:
local_entrystart, local_entrystop = self._localentries(i, entrystart, entrystop)
if local_entrystop > local_entrystart:
if reportentries:
yield (local_entrystart + self.basket_entrystart(i),
local_entrystop + self.basket_entrystart(i),
self.basket(i, interpretation=interpretation, entrystart=entrystart, entrystop=entrystop, flatten=flatten, awkwardlib=awkward0, cache=cache, basketcache=basketcache, keycache=keycache))
else:
yield self.basket(i, interpretation=interpretation, entrystart=entrystart, entrystop=entrystop, flatten=flatten, awkwardlib=awkward0, cache=cache, basketcache=basketcache, keycache=keycache)
def _basket_itemoffset(self, interpretation, basketstart, basketstop, keycache):
basket_itemoffset = [0]
for j, key in enumerate(self._threadsafe_iterate_keys(keycache, True, basketstart, basketstop)):
i = basketstart + j
numitems = interpretation.numitems(key.border, self.basket_numentries(i))
basket_itemoffset.append(basket_itemoffset[-1] + numitems)
return basket_itemoffset
def _basket_entryoffset(self, basketstart, basketstop):
basket_entryoffset = [0]
for i in range(basketstart, basketstop):
basket_entryoffset.append(basket_entryoffset[-1] + self.basket_numentries(i))
return basket_entryoffset
def array(self, interpretation=None, entrystart=None, entrystop=None, flatten=False, awkwardlib=None, cache=None, basketcache=None, keycache=None, executor=None, blocking=True):
if self._recoveredbaskets is None:
self._tryrecover()
awkward0 = _normalize_awkwardlib(awkwardlib)
interpretation = self._normalize_interpretation(interpretation, awkward0)
if interpretation is None:
raise ValueError("cannot interpret branch {0} as a Python type\n in file: {1}".format(repr(self.name), self._context.sourcepath))
entrystart, entrystop = _normalize_entrystartstop(self.numentries, entrystart, entrystop)
basketstart, basketstop = self._basketstartstop(entrystart, entrystop)
if basketstart is not None and basketstop is not None and self._source.parent() is not None:
self._source.parent().preload([self._fBasketSeek[i] for i in range(basketstart, basketstop)])
if cache is not None:
cachekey = self._cachekey(interpretation, entrystart, entrystop)
out = cache.get(cachekey, None)
if out is not None:
if flatten and isinstance(interpretation, asjagged):
out = out.content
if blocking:
return out
else:
return lambda: out
if basketstart is None:
if blocking:
return interpretation.empty()
else:
def wait():
return interpretation.empty()
return wait
if keycache is None:
keycache = {}
basket_itemoffset = self._basket_itemoffset(interpretation, basketstart, basketstop, keycache)
basket_entryoffset = self._basket_entryoffset(basketstart, basketstop)
destination = interpretation.destination(basket_itemoffset[-1], basket_entryoffset[-1])
def fill(j):
try:
i = j + basketstart
local_entrystart, local_entrystop = self._localentries(i, entrystart, entrystop)
source = self._basket(i, interpretation, local_entrystart, local_entrystop, awkward0, basketcache, keycache)
expecteditems = basket_itemoffset[j + 1] - basket_itemoffset[j]
source_numitems = interpretation.source_numitems(source)
expectedentries = basket_entryoffset[j + 1] - basket_entryoffset[j]
source_numentries = local_entrystop - local_entrystart
if j + 1 == basketstop - basketstart:
if expecteditems > source_numitems:
basket_itemoffset[j + 1] -= expecteditems - source_numitems
if expectedentries > source_numentries:
basket_entryoffset[j + 1] -= expectedentries - source_numentries
elif j == 0:
if expecteditems > source_numitems:
basket_itemoffset[j] += expecteditems - source_numitems
if expectedentries > source_numentries:
basket_entryoffset[j] += expectedentries - source_numentries
interpretation.fill(source,
destination,
basket_itemoffset[j],
basket_itemoffset[j + 1],
basket_entryoffset[j],
basket_entryoffset[j + 1])
except Exception:
return sys.exc_info()
if executor is None:
for j in range(basketstop - basketstart):
_delayedraise(fill(j))
excinfos = ()
else:
excinfos = executor.map(fill, range(basketstop - basketstart))
def wait():
for excinfo in excinfos:
_delayedraise(excinfo)
clipped = interpretation.clip(destination,
basket_itemoffset[0],
basket_itemoffset[-1],
basket_entryoffset[0],
basket_entryoffset[-1])
out = interpretation.finalize(clipped, self)
if cache is not None:
cache[cachekey] = out
if flatten and isinstance(interpretation, asjagged):
return out.content
else:
return out
if blocking:
return wait()
else:
return wait
def _step_array(self, interpretation, basket_itemoffset, basket_entryoffset, entrystart, entrystop, awkward0, basketcache, keycache, executor, explicit_basketcache):
if interpretation is None:
raise ValueError("cannot interpret branch {0} as a Python type\n in file: {1}".format(repr(self.name), self._context.sourcepath))
if self._recoveredbaskets is None:
self._tryrecover()
basketstart, basketstop = self._basketstartstop(entrystart, entrystop)
if basketstart is None:
return lambda: interpretation.empty()
destination = interpretation.destination(basket_itemoffset[-1], basket_entryoffset[-1])
def fill(j):
try:
i = j + basketstart
local_entrystart, local_entrystop = self._localentries(i, entrystart, entrystop)
source = self._basket(i, interpretation, local_entrystart, local_entrystop, awkward0, basketcache, keycache)
expecteditems = basket_itemoffset[j + 1] - basket_itemoffset[j]
source_numitems = interpretation.source_numitems(source)
expectedentries = basket_entryoffset[j + 1] - basket_entryoffset[j]
source_numentries = local_entrystop - local_entrystart
if j + 1 == basketstop - basketstart:
if expecteditems > source_numitems:
basket_itemoffset[j + 1] -= expecteditems - source_numitems
if expectedentries > source_numentries:
basket_entryoffset[j + 1] -= expectedentries - source_numentries
elif j == 0:
if expecteditems > source_numitems:
basket_itemoffset[j] += expecteditems - source_numitems
if expectedentries > source_numentries:
basket_entryoffset[j] += expectedentries - source_numentries
interpretation.fill(source,
destination,
basket_itemoffset[j],
basket_itemoffset[j + 1],
basket_entryoffset[j],
basket_entryoffset[j + 1])
except Exception:
return sys.exc_info()
if executor is None:
for j in range(basketstop - basketstart):
_delayedraise(fill(j))
excinfos = ()
else:
excinfos = executor.map(fill, range(basketstop - basketstart))
def wait():
for excinfo in excinfos:
_delayedraise(excinfo)
if not explicit_basketcache:
for i in range(basketstop - 1): # not including the last real basket
try:
del basketcache[self._basketcachekey(i)]
except KeyError:
pass
return interpretation.clip(destination,
basket_itemoffset[0],
basket_itemoffset[-1],
basket_entryoffset[0],
basket_entryoffset[-1])
return wait
def mempartitions(self, numbytes, entrystart=None, entrystop=None, keycache=None, linear=True):
m = _memsize(numbytes)
if m is not None:
numbytes = m
if numbytes <= 0:
raise ValueError("target numbytes must be positive")
awkward0 = _normalize_awkwardlib(None)
entrystart, entrystop = _normalize_entrystartstop(self.numentries, entrystart, entrystop)
if not linear:
raise NotImplementedError("non-linear mempartition has not been implemented")
relevant_numbytes = 0.0
if self._recoveredbaskets is None:
self._tryrecover()
for i, key in enumerate(self._threadsafe_iterate_keys(keycache, False)):
start, stop = self._entryoffsets[i], self._entryoffsets[i + 1]
if entrystart < stop and start < entrystop:
this_numbytes = key._fObjlen * (min(stop, entrystop) - max(start, entrystart)) / float(stop - start)
assert this_numbytes >= 0.0
relevant_numbytes += this_numbytes
entrysteps = max(1, round(math.ceil((entrystop - entrystart) * numbytes / relevant_numbytes)))
start, stop = entrystart, entrystart
while stop < entrystop:
stop = min(stop + entrysteps, entrystop)
if stop > start:
yield start, stop
start = stop
def _normalize_entrysteps(self, entrysteps, entrystart, entrystop, keycache):
numbytes = _memsize(entrysteps)
if numbytes is not None:
return self.mempartitions(numbytes, entrystart=entrystart, entrystop=entrystop, keycache=keycache, linear=True)
if isinstance(entrysteps, string_types):
raise ValueError("string {0} does not match the memory size pattern (number followed by B/kB/MB/GB/etc.)".format(repr(entrysteps)))
if entrysteps is None:
if self._recoveredbaskets is None:
self._tryrecover()
return [(self._entryoffsets[i], self._entryoffsets[i + 1]) for i in range(self.numbaskets) if entrystart < self._entryoffsets[i + 1] and entrystop >= self._entryoffsets[i]]
elif entrysteps == float("inf"):
return [(entrystart, min(entrystop, self.numentries))]
elif isinstance(entrysteps, (numbers.Integral, numpy.integer)):
entrystepsize = entrysteps
if entrystepsize <= 0:
raise ValueError("if an integer, entrysteps must be positive")
effectivestop = min(entrystop, self.numentries)
starts = numpy.arange(entrystart, effectivestop, entrystepsize)
stops = numpy.append(starts[1:], effectivestop)
return zip(starts, stops)
else:
try:
iter(entrysteps)
except TypeError:
raise TypeError("entrysteps must be None for cluster iteration, a positive integer for equal steps in number of entries (inf for maximal), a memory size string (number followed by B/kB/MB/GB/etc.), or an iterable of 2-tuples for explicit entry starts (inclusive) and stops (exclusive)")
return entrysteps
def lazyarray(self, interpretation=None, entrysteps=None, entrystart=None, entrystop=None, flatten=False, awkwardlib=None, cache=None, basketcache=None, keycache=None, executor=None, persistvirtual=False, chunked=True):
if self._recoveredbaskets is None:
self._tryrecover()
awkward0 = _normalize_awkwardlib(awkwardlib)
interpretation = self._normalize_interpretation(interpretation, awkward0)
if interpretation is None:
raise ValueError("cannot interpret branch {0} as a Python type\n in file: {1}".format(repr(self.name), self._context.sourcepath))
entrystart, entrystop = _normalize_entrystartstop(self.numentries, entrystart, entrystop)
if not chunked and entrysteps is None:
entrysteps = float('inf')
entrysteps = self._normalize_entrysteps(entrysteps, entrystart, entrystop, keycache)
inner = interpretation
while isinstance(inner, asjagged):
inner = inner.content
if isinstance(inner, asobj) and getattr(inner.cls, "_arraymethods", None) is not None:
VirtualArray = awkward0.Methods.mixin(inner.cls._arraymethods, awkward0.VirtualArray)
chunkedarray = awkward0.Methods.mixin(inner.cls._arraymethods, awkward0.ChunkedArray)
elif isinstance(inner, asgenobj) and getattr(inner.generator.cls, "_arraymethods", None) is not None:
VirtualArray = awkward0.Methods.mixin(inner.generator.cls._arraymethods, awkward0.VirtualArray)
chunkedarray = awkward0.Methods.mixin(inner.generator.cls._arraymethods, awkward0.ChunkedArray)
else:
VirtualArray = awkward0.VirtualArray
chunkedarray = awkward0.ChunkedArray
lazybranch = _LazyBranch(self._context.sourcepath, self._context.treename, self.name, self, interpretation, flatten, awkward0.__name__, basketcache, keycache, executor)
if chunked:
chunks = []
counts = []
for start, stop in entrysteps:
numentries = stop - start
chunks.append(VirtualArray(lazybranch, (start, stop), cache=cache, type=awkward0.type.ArrayType(numentries, interpretation.type), persistvirtual=persistvirtual))
counts.append(numentries)
out = chunkedarray(chunks, counts)
out.__doc__ = self.title.decode('ascii')
return out
else:
start, stop = entrysteps[0]
out = VirtualArray(lazybranch, (start, stop), cache=cache, type=awkward0.type.ArrayType(stop - start, interpretation.type), persistvirtual=persistvirtual)
out.__doc__ = self.title.decode('ascii')
return out
class _BasketKey(object):
def __init__(self, source, cursor, compression, complete):
start = cursor.index
self._fNbytes, self._fVersion, self._fObjlen, self._fDatime, self._fKeylen, self._fCycle, self._fSeekKey, self._fSeekPdir = cursor.fields(source, TBranchMethods._BasketKey._format_small)
if self._fVersion > 1000:
cursor.index = start
self._fNbytes, self._fVersion, self._fObjlen, self._fDatime, self._fKeylen, self._fCycle, self._fSeekKey, self._fSeekPdir = cursor.fields(source, TBranchMethods._BasketKey._format_big)
if complete:
cursor.index = start + self._fKeylen - TBranchMethods._BasketKey._format_complete.size - 1
self._fVersion, self._fBufferSize, self._fNevBufSize, self._fNevBuf, self._fLast = cursor.fields(source, TBranchMethods._BasketKey._format_complete)
self.border = self._fLast - self._fKeylen
if source.size() is not None:
if source.size() - self._fSeekKey < self._fNbytes:
s = source
while s.parent() is not None and s.parent() is not s:
s = s.parent()
raise ValueError("TKey declares that object has {0} bytes but only {1} remain in the file\n in file: {2}".format(self._fNbytes, source.size() - self._fSeekKey, s.path))
if self._fObjlen != self._fNbytes - self._fKeylen:
self.source = uproot3.source.compressed.CompressedSource(compression, source, Cursor(self._fSeekKey + self._fKeylen), self._fNbytes - self._fKeylen, self._fObjlen)
self.cursor = Cursor(0)
else:
self.source = source
self.cursor = Cursor(self._fSeekKey + self._fKeylen)
_format_small = struct.Struct(">ihiIhhii")
_format_big = struct.Struct(">ihiIhhqq")
_format_complete = struct.Struct(">Hiiii")
@property
def fName(self):
return "TBranchMethods._BasketKey"
@property
def fTitle(self):
return "TBranchMethods._BasketKey"
@property
def fClassName(self):
return "TBasket"
def basketdata(self):
datasource = self.source.threadlocal()
try:
return self.cursor.copied().bytes(datasource, self._fObjlen)
finally:
datasource.dismiss()
class _RecoveredTBasket(uproot3.rootio.ROOTObject):
@classmethod
def _readinto(cls, self, source, cursor, context, parent):
start = cursor.index
self._fNbytes, self._fVersion, self._fObjlen, self._fDatime, self._fKeylen, self._fCycle = cursor.fields(source, cls._format1)
# skip the class name, name, and title
cursor.index = start + self._fKeylen - cls._format2.size - 1
self._fVersion, self._fBufferSize, self._fNevBufSize, self._fNevBuf, self._fLast = cursor.fields(source, cls._format2)
# one-byte terminator
cursor.skip(1)
# then if you have offsets data, read them in
if self._fNevBufSize > 8:
byteoffsets = cursor.bytes(source, self._fNevBuf * 4 + 8)
cursor.skip(-4)
# there's a second TKey here, but it doesn't contain any new information (in fact, less)
cursor.skip(self._fKeylen)
size = self.border = self._fLast - self._fKeylen
# the data (not including offsets)
self.contents = cursor.bytes(source, size)
# put the offsets back in, in the way that we expect it
if self._fNevBufSize > 8:
self.contents = numpy.concatenate((self.contents, byteoffsets))
size += byteoffsets.nbytes
self._fObjlen = size
self._fNbytes = self._fObjlen + self._fKeylen
return self
_format1 = struct.Struct(">ihiIhh")
_format2 = struct.Struct(">Hiiii")
def basketdata(self):
return self.contents
@property
def numentries(self):
return self._fNevBuf
def _recover(self):
recoveredbaskets = [x for x in uproot3.rootio.TObjArray.read(self._source, self._fBaskets._cursor, self._context, self, asclass=TBranchMethods._RecoveredTBasket) if x is not None]
if self._numgoodbaskets == 0:
entryoffsets = [0]
else:
entryoffsets = self._fBasketEntry[:self._numgoodbaskets + 1].tolist()
for basket in recoveredbaskets:
entryoffsets.append(entryoffsets[-1] + basket.numentries)
if entryoffsets[-1] == self.numentries:
with self._recoverylock:
self._recoveredbaskets = recoveredbaskets
self._entryoffsets = entryoffsets
else:
if self.interpretation is None:
self._recoveredbaskets = []
else:
raise ValueError("entries in recovered baskets (offsets {0}) don't add up to total number of entries ({1})\n in file: {2}".format(entryoffsets, self.numentries, self._context.sourcepath))
def _tryrecover(self):
if self._recoveredbaskets is None:
self._recover()
def _basketkey(self, source, i, complete):
if 0 <= i < self._numgoodbaskets:
return self._BasketKey(source.parent(), Cursor(self._fBasketSeek[i]), self.compression, complete)
elif self._numgoodbaskets <= i < self.numbaskets:
return self._recoveredbaskets[i - self._numgoodbaskets]
else:
raise IndexError("index {0} out of range for branch with {1} baskets".format(i, self.numbaskets))
def _format(self, foldnames, indent="", strip=""):
name = self._fName.decode("ascii")
if foldnames and name.startswith(strip + "."):
name = name[len(strip) + 1:]
if len(name) > 26:
out = [indent + name, indent + "{0:26s} {1:26s} {2}".format("", "(no streamer)" if self._streamer is None else self._streamer.__class__.__name__, self.interpretation)]
else:
out = [indent + "{0:26s} {1:26s} {2}".format(name, "(no streamer)" if self._streamer is None else self._streamer.__class__.__name__, self.interpretation)]
for branch in self._fBranches:
out.extend(branch._format(foldnames, indent + " " if foldnames else indent, self._fName))
if len(self._fBranches) > 0 and out[-1] != "":
out.append("")
return out
def show(self, foldnames=False, stream=sys.stdout):
if stream is None:
return "\n".join(self._format(foldnames))
else:
for line in self._format(foldnames):
stream.write(line)
stream.write("\n")
def __len__(self):
return self.numentries
def __getitem__(self, name):
return self.get(name)
def __iter__(self):
# prevent Python's attempt to interpret __len__ and __getitem__ as iteration
raise TypeError("'TBranch' object is not iterable")
################################################################ for lazy arrays
class _LazyFiles(object):
def __init__(self, paths, treepath, branches, entrysteps, flatten, awkwardlib, basketcache, keycache, executor, persistvirtual, localsource, xrootdsource, httpsource, options):
self.paths = paths
self.treepath = treepath
self.branches = branches
self.entrysteps = entrysteps
self.flatten = flatten
self.awkwardlib = awkwardlib
self.basketcache = basketcache
self.keycache = keycache
self.executor = executor
self.persistvirtual = persistvirtual
self.localsource = localsource
self.xrootdsource = xrootdsource
self.httpsource = httpsource
self.options = options
self._init()
def _init(self):
self.trees = cachetools.LRUCache(5) # last 5 TTrees
if self.basketcache is None:
self.basketcache = uproot3.cache.ThreadSafeArrayCache(1024**2) # 1 MB
if self.keycache is None:
self.keycache = cachetools.LRUCache(10000) # last 10000 TKeys
def __getstate__(self):
return {"paths": self.paths,
"treepath": self.treepath,
"branches": self.branches,
"entrysteps": self.entrysteps,
"flatten": self.flatten,
"awkwardlib": self.awkwardlib,
"persistvirtual": self.persistvirtual,
"localsource": self.localsource,
"xrootdsource": self.xrootdsource,
"httpsource": self.httpsource,
"options": self.options}
def __setstate__(self, state):
self.paths = state["paths"]
self.treepath = state["treepath"]
self.branches = state["branches"]
self.entrysteps = state["entrysteps"]
self.flatten = state["flatten"]
self.awkwardlib = state["awkwardlib"]
self.basketcache = None
self.keycache = None
self.executor = None
self.persistvirtual = state["persistvirtual"]
self.localsource = state["localsource"]
self.xrootdsource = state["xrootdsource"]
self.httpsource = state["httpsource"]
self.options = state["options"]
self._init()
def __call__(self, pathi, branchname):
awkward0 = _normalize_awkwardlib(self.awkwardlib)
tree = self.trees.get(self.paths[pathi], None)
if tree is None:
tree = self.trees[self.paths[pathi]] = uproot3.rootio.open(self.paths[pathi])[self.treepath]
tree.interpretations = dict((b.name, x) for b, x in tree._normalize_branches(self.branches, awkward0))
return tree[branchname].lazyarray(interpretation=tree.interpretations[branchname], entrysteps=self.entrysteps, entrystart=None, entrystop=None, flatten=self.flatten, awkwardlib=awkward0, cache=None, basketcache=self.basketcache, keycache=self.keycache, executor=self.executor, persistvirtual=self.persistvirtual)
class _LazyTree(object):
def __init__(self, path, treepath, tree, interpretation, flatten, awkwardlib, basketcache, keycache, executor):
self.path = path
self.treepath = treepath
self.tree = tree
self.interpretation = interpretation
self.flatten = flatten
self.awkwardlib = awkwardlib
self.basketcache = basketcache
self.keycache = keycache
self.executor = executor
self._init()
def _init(self):
if self.tree is None:
self.tree = uproot3.rootio.open(self.path)[self.treepath]
if self.basketcache is None:
self.basketcache = uproot3.cache.ThreadSafeArrayCache(1024**2) # 1 MB
if self.keycache is None:
self.keycache = {} # unlimited
def __getstate__(self):
return {"path": self.path,
"treepath": self.treepath,
"interpretation": self.interpretation,
"flatten": self.flatten,
"awkwardlib": self.awkwardlib}
def __setstate__(self, state):
self.path = state["path"]
self.treepath = state["treepath"]
self.tree = None
self.interpretation = state["interpretation"]
self.flatten = state["flatten"]
self.awkwardlib = state["awkwardlib"]
self.basketcache = None
self.keycache = None
self.executor = None
self._init()
def __call__(self, branch, entrystart, entrystop):
return self.tree[branch].array(interpretation=self.interpretation[branch], entrystart=entrystart, entrystop=entrystop, flatten=self.flatten, awkwardlib=self.awkwardlib, cache=None, basketcache=self.basketcache, keycache=self.keycache, executor=self.executor)
class _LazyBranch(object):
def __init__(self, path, treepath, branchname, branch, interpretation, flatten, awkwardlib, basketcache, keycache, executor):
self.path = path
self.treepath = treepath
self.branchname = branchname
self.branch = branch
self.interpretation = interpretation
self.flatten = flatten
self.awkwardlib = awkwardlib
self.basketcache = basketcache
self.keycache = keycache
self.executor = executor
self._init()
def _init(self):
if self.branch is None:
self.branch = uproot3.rootio.open(self.path)[self.treepath][self.branchname]
if self.basketcache is None:
self.basketcache = uproot3.cache.ThreadSafeArrayCache(1024**2) # 1 MB
if self.keycache is None:
self.keycache = {} # unlimited
def __getstate__(self):
return {"path": self.path,
"treepath": self.treepath,
"branchname": self.branchname,
"interpretation": self.interpretation,
"flatten": self.flatten,
"awkwardlib": self.awkwardlib}
def __setstate__(self, state):
self.path = state["path"]
self.treepath = state["treepath"]
self.branchname = state["branchname"]
self.branch = None
self.interpretation = state["interpretation"]
self.flatten = state["flatten"]
self.awkwardlib = state["awkwardlib"]
self.basketcache = None
self.keycache = None
self.executor = None
self._init()
def __call__(self, entrystart, entrystop):
return self.branch.array(interpretation=self.interpretation, entrystart=entrystart, entrystop=entrystop, flatten=self.flatten, awkwardlib=self.awkwardlib, cache=None, basketcache=self.basketcache, keycache=self.keycache, executor=self.executor, blocking=True)
def lazyarray(path, treepath, branchname, interpretation=None, namedecode="utf-8", entrysteps=float("inf"), flatten=False, awkwardlib=None, cache=None, basketcache=None, keycache=None, executor=None, persistvirtual=False, localsource=MemmapSource.defaults, xrootdsource=XRootDSource.defaults, httpsource=HTTPSource.defaults, **options):
if interpretation is None:
branches = branchname
else:
branches = {branchname: interpretation}
out = lazyarrays(path, treepath, branches=branches, namedecode=namedecode, entrysteps=entrysteps, flatten=flatten, profile=None, awkwardlib=awkwardlib, cache=cache, basketcache=basketcache, keycache=keycache, executor=executor, persistvirtual=persistvirtual, localsource=localsource, xrootdsource=xrootdsource, httpsource=httpsource, **options)
if len(out.columns) != 1:
raise ValueError("list of branch names or glob/regex matches more than one branch; use uproot3.lazyarrays (plural)")
return out[out.columns[0]]
def lazyarrays(path, treepath, branches=None, namedecode="utf-8", entrysteps=float("inf"), flatten=False, profile=None, awkwardlib=None, cache=None, basketcache=None, keycache=None, executor=None, persistvirtual=False, localsource=MemmapSource.defaults, xrootdsource=XRootDSource.defaults, httpsource=HTTPSource.defaults, **options):
awkward0 = _normalize_awkwardlib(awkwardlib)
if isinstance(path, string_types):
paths = _filename_explode(path)
else:
paths = [y for x in path for y in _filename_explode(x)]
path2count = numentries(path, treepath, total=False, localsource=localsource, xrootdsource=xrootdsource, httpsource=httpsource, executor=executor, blocking=True)
lazyfiles = _LazyFiles(paths, treepath, branches, entrysteps, flatten, awkward0.__name__, basketcache, keycache, executor, persistvirtual, localsource, xrootdsource, httpsource, options)
brancheslist = None
for path in paths:
file = uproot3.rootio.open(path, localsource=localsource, xrootdsource=xrootdsource, httpsource=httpsource, **options)
try:
tree = file[treepath]
except KeyError:
continue
brancheslist = list(tree._normalize_branches(branches, awkward0))
break
if brancheslist is None:
raise ValueError("no matching paths contained a tree named {0}".format(repr(treepath)))
out = awkward0.Table()
for branch, interpretation in brancheslist:
inner = interpretation
while isinstance(inner, asjagged):
inner = inner.content
if isinstance(inner, asobj) and getattr(inner.cls, "_arraymethods", None) is not None:
VirtualArray = awkward0.Methods.mixin(inner.cls._arraymethods, awkward0.VirtualArray)
elif isinstance(inner, asgenobj) and getattr(inner.generator.cls, "_arraymethods", None) is not None:
VirtualArray = awkward0.Methods.mixin(inner.generator.cls._arraymethods, awkward0.VirtualArray)
else:
VirtualArray = awkward0.VirtualArray
chunks = []
counts = []
for pathi, path in enumerate(paths):
chunks.append(VirtualArray(lazyfiles, (pathi, branch.name), cache=cache, type=awkward0.type.ArrayType(path2count[path], interpretation.type), persistvirtual=persistvirtual))
counts.append(path2count[path])
name = branch.name.decode("ascii") if namedecode is None else branch.name.decode(namedecode)
out[name] = awkward0.ChunkedArray(chunks, counts)
if profile is not None:
out = uproot_methods.profiles.transformer(profile)(out)
return out
def daskarray(path, treepath, branchname, interpretation=None, namedecode="utf-8", entrysteps=float("inf"), flatten=False, awkwardlib=None, cache=None, basketcache=None, keycache=None, executor=None, localsource=MemmapSource.defaults, xrootdsource=XRootDSource.defaults, httpsource=HTTPSource.defaults, **options):
out = lazyarray(path, treepath, branchname, interpretation=interpretation, namedecode=namedecode, entrysteps=entrysteps, flatten=flatten, awkwardlib=awkwardlib, cache=cache, basketcache=basketcache, keycache=keycache, executor=executor, persistvirtual=False, localsource=localsource, xrootdsource=xrootdsource, httpsource=httpsource, **options)
import dask.array
if len(out.shape) == 1:
return dask.array.from_array(out, out.shape, fancy=True)
else:
raise NotImplementedError("TODO: len(shape) > 1")
def daskframe(path, treepath, branches=None, namedecode="utf-8", entrysteps=float("inf"), flatten=False, awkwardlib=None, cache=None, basketcache=None, keycache=None, executor=None, localsource=MemmapSource.defaults, xrootdsource=XRootDSource.defaults, httpsource=HTTPSource.defaults, **options):
import dask.array
import dask.dataframe
out = lazyarrays(path, treepath, branches=branches, namedecode=namedecode, entrysteps=entrysteps, flatten=flatten, profile=None, awkwardlib=awkwardlib, cache=cache, basketcache=basketcache, keycache=keycache, executor=executor, persistvirtual=False, localsource=localsource, xrootdsource=xrootdsource, httpsource=httpsource, **options)
series = []
for n in out.columns:
x = out[n]
if len(x.shape) == 1:
array = dask.array.from_array(x, x.shape, fancy=True)
series.append(dask.dataframe.from_dask_array(array, columns=n))
else:
raise NotImplementedError("TODO: len(shape) > 1")
return dask.dataframe.concat(series, axis=1)
################################################################ for quickly getting numentries
def numentries(path, treepath, total=True, localsource=MemmapSource.defaults, xrootdsource={"timeout": None, "chunkbytes": 32*1024, "limitbytes": 1024**2, "parallel": False}, httpsource={"chunkbytes": 32*1024, "limitbytes": 1024**2, "parallel": False}, executor=None, blocking=True, **options):
if isinstance(path, string_types):
paths = _filename_explode(path)
else:
paths = [y for x in path for y in _filename_explode(x)]
return _numentries(paths, treepath, total, localsource, xrootdsource, httpsource, executor, blocking, [None] * len(paths), options)
def _numentries(paths, treepath, total, localsource, xrootdsource, httpsource, executor, blocking, uuids, options):
class _TTreeForNumEntries(uproot3.rootio.ROOTStreamedObject):
@classmethod
def _readinto(cls, self, source, cursor, context, parent):
start, cnt, classversion = uproot3.rootio._startcheck(source, cursor)
tnamed = uproot3.rootio.Undefined.read(source, cursor, context, parent)
tattline = uproot3.rootio.Undefined.read(source, cursor, context, parent)
tattfill = uproot3.rootio.Undefined.read(source, cursor, context, parent)
tattmarker = uproot3.rootio.Undefined.read(source, cursor, context, parent)
self._fEntries, = cursor.fields(source, _TTreeForNumEntries._format1)
return self
_format1 = struct.Struct('>q')
out = [None] * len(paths)
def fill(i):
try:
file = uproot3.rootio.open(paths[i], localsource=localsource, xrootdsource=xrootdsource, httpsource=httpsource, read_streamers=False, **options)
except Exception:
return sys.exc_info()
else:
try:
source = file._context.source
file._context.classes["TTree"] = _TTreeForNumEntries
try:
out[i] = file[treepath]._fEntries
except KeyError:
out[i] = 0
uuids[i] = file._context.uuid
except Exception:
return sys.exc_info()
else:
return None
finally:
source.close()
if executor is None:
for i in range(len(paths)):
_delayedraise(fill(i))
excinfos = ()
else:
excinfos = executor.map(fill, range(len(paths)))
def wait():
for excinfo in excinfos:
_delayedraise(excinfo)
if total:
return sum(out)
else:
return OrderedDict(zip(paths, out))
if blocking:
return wait()
else:
return wait
| bsd-3-clause |
AmurG/tardis | tardis/io/config_reader.py | 3 | 39949 | # Module to read the rather complex config data
import logging
import os
import pprint
from astropy import constants, units as u
import numpy as np
import pandas as pd
import yaml
import tardis
from tardis.io.model_reader import read_density_file, \
calculate_density_after_time, read_abundances_file
from tardis.io.config_validator import ConfigurationValidator
from tardis import atomic
from tardis.util import species_string_to_tuple, parse_quantity, \
element_symbol2atomic_number
import copy
pp = pprint.PrettyPrinter(indent=4)
logger = logging.getLogger(__name__)
data_dir = os.path.join(tardis.__path__[0], 'data')
default_config_definition_file = os.path.join(data_dir,
'tardis_config_definition.yml')
#File parsers for different file formats:
density_structure_fileparser = {}
inv_ni56_efolding_time = 1 / (8.8 * u.day)
inv_co56_efolding_time = 1 / (113.7 * u.day)
inv_cr48_efolding_time = 1 / (1.29602 * u.day)
inv_v48_efolding_time = 1 / (23.0442 * u.day)
inv_fe52_efolding_time = 1 / (0.497429 * u.day)
inv_mn52_efolding_time = 1 / (0.0211395 * u.day)
class ConfigurationError(ValueError):
pass
def parse_quantity_linspace(quantity_linspace_dictionary, add_one=True):
"""
parse a dictionary of the following kind
{'start': 5000 km/s,
'stop': 10000 km/s,
'num': 1000}
Parameters
----------
quantity_linspace_dictionary: ~dict
add_one: boolean, default: True
Returns
-------
~np.array
"""
start = parse_quantity(quantity_linspace_dictionary['start'])
stop = parse_quantity(quantity_linspace_dictionary['stop'])
try:
stop = stop.to(start.unit)
except u.UnitsError:
raise ConfigurationError('"start" and "stop" keyword must be compatible quantities')
num = quantity_linspace_dictionary['num']
if add_one:
num += 1
return np.linspace(start.value, stop.value, num=num) * start.unit
def parse_spectral_bin(spectral_bin_boundary_1, spectral_bin_boundary_2):
spectral_bin_boundary_1 = parse_quantity(spectral_bin_boundary_1).to('Angstrom', u.spectral())
spectral_bin_boundary_2 = parse_quantity(spectral_bin_boundary_2).to('Angstrom', u.spectral())
spectrum_start_wavelength = min(spectral_bin_boundary_1, spectral_bin_boundary_2)
spectrum_end_wavelength = max(spectral_bin_boundary_1, spectral_bin_boundary_2)
return spectrum_start_wavelength, spectrum_end_wavelength
def calculate_exponential_density(velocities, v_0, rho0):
"""
This function computes the exponential density profile.
:math:`\\rho = \\rho_0 \\times \\exp \\left( -\\frac{v}{v_0} \\right)`
Parameters
----------
velocities : ~astropy.Quantity
Array like velocity profile
velocity_0 : ~astropy.Quantity
reference velocity
rho0 : ~astropy.Quantity
reference density
Returns
-------
densities : ~astropy.Quantity
"""
densities = rho0 * np.exp(-(velocities / v_0))
return densities
def calculate_power_law_density(velocities, velocity_0, rho_0, exponent):
"""
This function computes a descret exponential density profile.
:math:`\\rho = \\rho_0 \\times \\left( \\frac{v}{v_0} \\right)^n`
Parameters
----------
velocities : ~astropy.Quantity
Array like velocity profile
velocity_0 : ~astropy.Quantity
reference velocity
rho0 : ~astropy.Quantity
reference density
exponent : ~float
exponent used in the powerlaw
Returns
-------
densities : ~astropy.Quantity
"""
densities = rho_0 * np.power((velocities / velocity_0), exponent)
return densities
def parse_model_file_section(model_setup_file_dict, time_explosion):
def parse_artis_model_setup_files(model_file_section_dict, time_explosion):
###### Reading the structure part of the ARTIS file pair
structure_fname = model_file_section_dict['structure_fname']
for i, line in enumerate(file(structure_fname)):
if i == 0:
no_of_shells = np.int64(line.strip())
elif i == 1:
time_of_model = u.Quantity(float(line.strip()), 'day').to('s')
elif i == 2:
break
artis_model_columns = ['velocities', 'mean_densities_0', 'ni56_fraction', 'co56_fraction', 'fe52_fraction',
'cr48_fraction']
artis_model = np.recfromtxt(structure_fname, skip_header=2, usecols=(1, 2, 4, 5, 6, 7), unpack=True,
dtype=[(item, np.float64) for item in artis_model_columns])
#converting densities from log(g/cm^3) to g/cm^3 and stretching it to the current ti
velocities = u.Quantity(np.append([0], artis_model['velocities']), 'km/s').to('cm/s')
mean_densities_0 = u.Quantity(10 ** artis_model['mean_densities_0'], 'g/cm^3')
mean_densities = calculate_density_after_time(mean_densities_0, time_of_model, time_explosion)
#Verifying information
if len(mean_densities) == no_of_shells:
logger.debug('Verified ARTIS model structure file %s (no_of_shells=length of dataset)', structure_fname)
else:
raise ConfigurationError(
'Error in ARTIS file %s - Number of shells not the same as dataset length' % structure_fname)
v_inner = velocities[:-1]
v_outer = velocities[1:]
volumes = (4 * np.pi / 3) * (time_of_model ** 3) * ( v_outer ** 3 - v_inner ** 3)
masses = (volumes * mean_densities_0 / constants.M_sun).to(1)
logger.info('Read ARTIS configuration file %s - found %d zones with total mass %g Msun', structure_fname,
no_of_shells, sum(masses.value))
if 'v_lowest' in model_file_section_dict:
v_lowest = parse_quantity(model_file_section_dict['v_lowest']).to('cm/s').value
min_shell = v_inner.value.searchsorted(v_lowest)
else:
min_shell = 1
if 'v_highest' in model_file_section_dict:
v_highest = parse_quantity(model_file_section_dict['v_highest']).to('cm/s').value
max_shell = v_outer.value.searchsorted(v_highest)
else:
max_shell = no_of_shells
artis_model = artis_model[min_shell:max_shell]
v_inner = v_inner[min_shell:max_shell]
v_outer = v_outer[min_shell:max_shell]
mean_densities = mean_densities[min_shell:max_shell]
###### Reading the abundance part of the ARTIS file pair
abundances_fname = model_file_section_dict['abundances_fname']
abundances = pd.DataFrame(np.loadtxt(abundances_fname)[min_shell:max_shell, 1:].transpose(),
index=np.arange(1, 31))
ni_stable = abundances.ix[28] - artis_model['ni56_fraction']
co_stable = abundances.ix[27] - artis_model['co56_fraction']
fe_stable = abundances.ix[26] - artis_model['fe52_fraction']
mn_stable = abundances.ix[25] - 0.0
cr_stable = abundances.ix[24] - artis_model['cr48_fraction']
v_stable = abundances.ix[23] - 0.0
ti_stable = abundances.ix[22] - 0.0
abundances.ix[28] = ni_stable
abundances.ix[28] += artis_model['ni56_fraction'] * np.exp(
-(time_explosion * inv_ni56_efolding_time).to(1).value)
abundances.ix[27] = co_stable
abundances.ix[27] += artis_model['co56_fraction'] * np.exp(
-(time_explosion * inv_co56_efolding_time).to(1).value)
abundances.ix[27] += (inv_ni56_efolding_time * artis_model['ni56_fraction'] /
(inv_ni56_efolding_time - inv_co56_efolding_time)) * \
(np.exp(-(inv_co56_efolding_time * time_explosion).to(1).value) - np.exp(
-(inv_ni56_efolding_time * time_explosion).to(1).value))
abundances.ix[26] = fe_stable
abundances.ix[26] += artis_model['fe52_fraction'] * np.exp(
-(time_explosion * inv_fe52_efolding_time).to(1).value)
abundances.ix[26] += ((artis_model['co56_fraction'] * inv_ni56_efolding_time
- artis_model['co56_fraction'] * inv_co56_efolding_time
+ artis_model['ni56_fraction'] * inv_ni56_efolding_time
- artis_model['ni56_fraction'] * inv_co56_efolding_time
- artis_model['co56_fraction'] * inv_ni56_efolding_time * np.exp(
-(inv_co56_efolding_time * time_explosion).to(1).value)
+ artis_model['co56_fraction'] * inv_co56_efolding_time * np.exp(
-(inv_co56_efolding_time * time_explosion).to(1).value)
- artis_model['ni56_fraction'] * inv_ni56_efolding_time * np.exp(
-(inv_co56_efolding_time * time_explosion).to(1).value)
+ artis_model['ni56_fraction'] * inv_co56_efolding_time * np.exp(
-(inv_ni56_efolding_time * time_explosion).to(1).value))
/ (inv_ni56_efolding_time - inv_co56_efolding_time))
abundances.ix[25] = mn_stable
abundances.ix[25] += (inv_fe52_efolding_time * artis_model['fe52_fraction'] /
(inv_fe52_efolding_time - inv_mn52_efolding_time)) * \
(np.exp(-(inv_mn52_efolding_time * time_explosion).to(1).value) - np.exp(
-(inv_fe52_efolding_time * time_explosion).to(1).value))
abundances.ix[24] = cr_stable
abundances.ix[24] += artis_model['cr48_fraction'] * np.exp(
-(time_explosion * inv_cr48_efolding_time).to(1).value)
abundances.ix[24] += ((artis_model['fe52_fraction'] * inv_fe52_efolding_time
- artis_model['fe52_fraction'] * inv_mn52_efolding_time
- artis_model['fe52_fraction'] * inv_fe52_efolding_time * np.exp(
-(inv_mn52_efolding_time * time_explosion).to(1).value)
+ artis_model['fe52_fraction'] * inv_mn52_efolding_time * np.exp(
-(inv_fe52_efolding_time * time_explosion).to(1).value))
/ (inv_fe52_efolding_time - inv_mn52_efolding_time))
abundances.ix[23] = v_stable
abundances.ix[23] += (inv_cr48_efolding_time * artis_model['cr48_fraction'] /
(inv_cr48_efolding_time - inv_v48_efolding_time)) * \
(np.exp(-(inv_v48_efolding_time * time_explosion).to(1).value) - np.exp(
-(inv_cr48_efolding_time * time_explosion).to(1).value))
abundances.ix[22] = ti_stable
abundances.ix[22] += ((artis_model['cr48_fraction'] * inv_cr48_efolding_time
- artis_model['cr48_fraction'] * inv_v48_efolding_time
- artis_model['cr48_fraction'] * inv_cr48_efolding_time * np.exp(
-(inv_v48_efolding_time * time_explosion).to(1).value)
+ artis_model['cr48_fraction'] * inv_v48_efolding_time * np.exp(
-(inv_cr48_efolding_time * time_explosion).to(1).value))
/ (inv_cr48_efolding_time - inv_v48_efolding_time))
if 'split_shells' in model_file_section_dict:
split_shells = int(model_file_section_dict['split_shells'])
else:
split_shells = 1
if split_shells > 1:
logger.info('Increasing the number of shells by a factor of %s' % split_shells)
no_of_shells = len(v_inner)
velocities = np.linspace(v_inner[0], v_outer[-1], no_of_shells * split_shells + 1)
v_inner = velocities[:-1]
v_outer = velocities[1:]
old_mean_densities = mean_densities
mean_densities = np.empty(no_of_shells * split_shells) * old_mean_densities.unit
new_abundance_data = np.empty((abundances.values.shape[0], no_of_shells * split_shells))
for i in xrange(split_shells):
mean_densities[i::split_shells] = old_mean_densities
new_abundance_data[:, i::split_shells] = abundances.values
abundances = pd.DataFrame(new_abundance_data, index=abundances.index)
#def parser_simple_ascii_model
return v_inner, v_outer, mean_densities, abundances
model_file_section_parser = {}
model_file_section_parser['artis'] = parse_artis_model_setup_files
try:
parser = model_file_section_parser[model_setup_file_dict['type']]
except KeyError:
raise ConfigurationError('In abundance file section only types %s are allowed (supplied %s) ' %
(model_file_section_parser.keys(), model_file_section_parser['type']))
return parser(model_setup_file_dict, time_explosion)
def parse_density_file_section(density_file_dict, time_explosion):
density_file_parser = {}
def parse_artis_density(density_file_dict, time_explosion):
density_file = density_file_dict['name']
for i, line in enumerate(file(density_file)):
if i == 0:
no_of_shells = np.int64(line.strip())
elif i == 1:
time_of_model = u.Quantity(float(line.strip()), 'day').to('s')
elif i == 2:
break
velocities, mean_densities_0 = np.recfromtxt(density_file, skip_header=2, usecols=(1, 2), unpack=True)
#converting densities from log(g/cm^3) to g/cm^3 and stretching it to the current ti
velocities = u.Quantity(np.append([0], velocities), 'km/s').to('cm/s')
mean_densities_0 = u.Quantity(10 ** mean_densities_0, 'g/cm^3')
mean_densities = calculate_density_after_time(mean_densities_0, time_of_model, time_explosion)
#Verifying information
if len(mean_densities) == no_of_shells:
logger.debug('Verified ARTIS file %s (no_of_shells=length of dataset)', density_file)
else:
raise ConfigurationError(
'Error in ARTIS file %s - Number of shells not the same as dataset length' % density_file)
min_shell = 1
max_shell = no_of_shells
v_inner = velocities[:-1]
v_outer = velocities[1:]
volumes = (4 * np.pi / 3) * (time_of_model ** 3) * ( v_outer ** 3 - v_inner ** 3)
masses = (volumes * mean_densities_0 / constants.M_sun).to(1)
logger.info('Read ARTIS configuration file %s - found %d zones with total mass %g Msun', density_file,
no_of_shells, sum(masses.value))
if 'v_lowest' in density_file_dict:
v_lowest = parse_quantity(density_file_dict['v_lowest']).to('cm/s').value
min_shell = v_inner.value.searchsorted(v_lowest)
else:
min_shell = 1
if 'v_highest' in density_file_dict:
v_highest = parse_quantity(density_file_dict['v_highest']).to('cm/s').value
max_shell = v_outer.value.searchsorted(v_highest)
else:
max_shell = no_of_shells
v_inner = v_inner[min_shell:max_shell]
v_outer = v_outer[min_shell:max_shell]
mean_densities = mean_densities[min_shell:max_shell]
return v_inner, v_outer, mean_densities, min_shell, max_shell
density_file_parser['artis'] = parse_artis_density
try:
parser = density_file_parser[density_file_dict['type']]
except KeyError:
raise ConfigurationError('In abundance file section only types %s are allowed (supplied %s) ' %
(density_file_parser.keys(), density_file_dict['type']))
return parser(density_file_dict, time_explosion)
def parse_density_section(density_dict, v_inner, v_outer, time_explosion):
density_parser = {}
#Parse density uniform
def parse_uniform(density_dict, v_inner, v_outer, time_explosion):
no_of_shells = len(v_inner)
return density_dict['value'].to('g cm^-3') * np.ones(no_of_shells)
density_parser['uniform'] = parse_uniform
#Parse density branch85 w7
def parse_branch85(density_dict, v_inner, v_outer, time_explosion):
velocities = 0.5 * (v_inner + v_outer)
densities = calculate_power_law_density(velocities,
density_dict['w7_v_0'],
density_dict['w7_rho_0'], -7)
densities = calculate_density_after_time(densities,
density_dict['w7_time_0'],
time_explosion)
return densities
density_parser['branch85_w7'] = parse_branch85
def parse_power_law(density_dict, v_inner, v_outer, time_explosion):
time_0 = density_dict.pop('time_0')
rho_0 = density_dict.pop('rho_0')
v_0 = density_dict.pop('v_0')
exponent = density_dict.pop('exponent')
velocities = 0.5 * (v_inner + v_outer)
densities = calculate_power_law_density(velocities, v_0, rho_0, exponent)
densities = calculate_density_after_time(densities, time_0, time_explosion)
return densities
density_parser['power_law'] = parse_power_law
def parse_exponential(density_dict, v_inner, v_outer, time_explosion):
time_0 = density_dict.pop('time_0')
rho_0 = density_dict.pop('rho_0')
v_0 = density_dict.pop('v_0')
velocities = 0.5 * (v_inner + v_outer)
densities = calculate_exponential_density(velocities, v_0, rho_0)
densities = calculate_density_after_time(densities, time_0, time_explosion)
return densities
density_parser['exponential'] = parse_exponential
try:
parser = density_parser[density_dict['type']]
except KeyError:
raise ConfigurationError('In density section only types %s are allowed (supplied %s) ' %
(density_parser.keys(), density_dict['type']))
return parser(density_dict, v_inner, v_outer, time_explosion)
def parse_abundance_file_section(abundance_file_dict, abundances, min_shell, max_shell):
abundance_file_parser = {}
def parse_artis(abundance_file_dict, abundances, min_shell, max_shell):
#### ---- debug ----
time_of_model = 0.0
####
fname = abundance_file_dict['name']
max_atom = 30
logger.info("Parsing ARTIS Abundance section from shell %d to %d", min_shell, max_shell)
abundances.values[:max_atom, :] = np.loadtxt(fname)[min_shell:max_shell, 1:].transpose()
return abundances
abundance_file_parser['artis'] = parse_artis
try:
parser = abundance_file_parser[abundance_file_dict['type']]
except KeyError:
raise ConfigurationError('In abundance file section only types %s are allowed (supplied %s) ' %
(abundance_file_parser.keys(), abundance_file_dict['type']))
return parser(abundance_file_dict, abundances, min_shell, max_shell)
def parse_supernova_section(supernova_dict):
"""
Parse the supernova section
Parameters
----------
supernova_dict: dict
YAML parsed supernova dict
Returns
-------
config_dict: dict
"""
config_dict = {}
#parse luminosity
luminosity_value, luminosity_unit = supernova_dict['luminosity_requested'].strip().split()
if luminosity_unit == 'log_lsun':
config_dict['luminosity_requested'] = 10 ** (
float(luminosity_value) + np.log10(constants.L_sun.cgs.value)) * u.erg / u.s
else:
config_dict['luminosity_requested'] = (float(luminosity_value) * u.Unit(luminosity_unit)).to('erg/s')
config_dict['time_explosion'] = parse_quantity(supernova_dict['time_explosion']).to('s')
if 'distance' in supernova_dict:
config_dict['distance'] = parse_quantity(supernova_dict['distance'])
else:
config_dict['distance'] = None
if 'luminosity_wavelength_start' in supernova_dict:
config_dict['luminosity_nu_end'] = parse_quantity(supernova_dict['luminosity_wavelength_start']). \
to('Hz', u.spectral())
else:
config_dict['luminosity_nu_end'] = np.inf * u.Hz
if 'luminosity_wavelength_end' in supernova_dict:
config_dict['luminosity_nu_start'] = parse_quantity(supernova_dict['luminosity_wavelength_end']). \
to('Hz', u.spectral())
else:
config_dict['luminosity_nu_start'] = 0.0 * u.Hz
return config_dict
def parse_spectrum_list2dict(spectrum_list):
"""
Parse the spectrum list [start, stop, num] to a list
"""
if spectrum_list[0].unit.physical_type != 'length' and \
spectrum_list[1].unit.physical_type != 'length':
raise ValueError('start and end of spectrum need to be a length')
spectrum_config_dict = {}
spectrum_config_dict['start'] = spectrum_list[0]
spectrum_config_dict['end'] = spectrum_list[1]
spectrum_config_dict['bins'] = spectrum_list[2]
spectrum_frequency = np.linspace(
spectrum_config_dict['end'].to('Hz', u.spectral()),
spectrum_config_dict['start'].to('Hz', u.spectral()),
num=spectrum_config_dict['bins'] + 1)
spectrum_config_dict['frequency'] = spectrum_frequency
return spectrum_config_dict
def parse_convergence_section(convergence_section_dict):
"""
Parse the convergence section dictionary
Parameters
----------
convergence_section_dict: ~dict
dictionary
"""
for convergence_variable in ['t_inner', 't_rad', 'w']:
if convergence_variable not in convergence_section_dict:
convergence_section_dict[convergence_variable] = {}
updated_convergence_dict = convergence_section_dict[
'global_convergence_parameters'].copy()
updated_convergence_dict.update(
convergence_section_dict[convergence_variable])
convergence_section_dict[convergence_variable] = \
updated_convergence_dict
return convergence_section_dict
def calculate_w7_branch85_densities(velocities, time_explosion, time_0=19.9999584, density_coefficient=3e29):
"""
Generated densities from the fit to W7 in Branch 85 page 620 (citation missing)
Parameters
----------
velocities : `~numpy.ndarray`
velocities in cm/s
time_explosion : `float`
time since explosion needed to descale density with expansion
time_0 : `float`
time in seconds of the w7 model - default 19.999, no reason to change
density_coefficient : `float`
coefficient for the polynomial - obtained by fitting to W7, no reason to change
"""
densities = density_coefficient * (velocities * 1e-5) ** -7
densities = calculate_density_after_time(densities, time_0, time_explosion)
return densities[1:]
class ConfigurationNameSpace(dict):
"""
The configuration name space class allows to wrap a dictionary and adds
utility functions for easy access. Accesses like a.b.c are then possible
Code from http://goo.gl/KIaq8I
Parameters
----------
config_dict: ~dict
configuration dictionary
Returns
-------
config_ns: ConfigurationNameSpace
"""
@classmethod
def from_yaml(cls, fname):
"""
Read a configuration from a YAML file
Parameters
----------
fname: str
filename or path
"""
try:
yaml_dict = yaml.load(file(fname))
except IOError as e:
logger.critical('No config file named: %s', fname)
raise e
return cls.from_config_dict(yaml_dict)
@classmethod
def from_config_dict(cls, config_dict, config_definition_file=None):
"""
Validating a config file.
Parameters
----------
config_dict : ~dict
dictionary of a raw unvalidated config file
Returns
-------
`tardis.config_reader.Configuration`
"""
if config_definition_file is None:
config_definition_file = default_config_definition_file
config_definition = yaml.load(open(config_definition_file))
return cls(ConfigurationValidator(config_definition,
config_dict).get_config())
marker = object()
def __init__(self, value=None):
if value is None:
pass
elif isinstance(value, dict):
for key in value:
self.__setitem__(key, value[key])
else:
raise TypeError, 'expected dict'
def __setitem__(self, key, value):
if isinstance(value, dict) and not isinstance(value,
ConfigurationNameSpace):
value = ConfigurationNameSpace(value)
if key in self and hasattr(self[key], 'unit'):
value = u.Quantity(value, self[key].unit)
dict.__setitem__(self, key, value)
def __getitem__(self, key):
return super(ConfigurationNameSpace, self).__getitem__(key)
def __getattr__(self, item):
if item in self:
return self[item]
else:
super(ConfigurationNameSpace, self).__getattribute__(item)
__setattr__ = __setitem__
def __dir__(self):
return self.keys()
def get_config_item(self, config_item_string):
"""
Get configuration items using a string of type 'a.b.param'
Parameters
----------
config_item_string: ~str
string of shape 'section1.sectionb.param1'
"""
config_item_path = config_item_string.split('.')
if len(config_item_path) == 1:
config_item = config_item_path[0]
if config_item.startswith('item'):
return self[config_item_path[0]]
else:
return self[config_item]
elif len(config_item_path) == 2 and\
config_item_path[1].startswith('item'):
return self[config_item_path[0]][
int(config_item_path[1].replace('item', ''))]
else:
return self[config_item_path[0]].get_config_item(
'.'.join(config_item_path[1:]))
def set_config_item(self, config_item_string, value):
"""
set configuration items using a string of type 'a.b.param'
Parameters
----------
config_item_string: ~str
string of shape 'section1.sectionb.param1'
value:
value to set the parameter with it
"""
config_item_path = config_item_string.split('.')
if len(config_item_path) == 1:
self[config_item_path[0]] = value
elif len(config_item_path) == 2 and \
config_item_path[1].startswith('item'):
current_value = self[config_item_path[0]][
int(config_item_path[1].replace('item', ''))]
if hasattr(current_value, 'unit'):
self[config_item_path[0]][
int(config_item_path[1].replace('item', ''))] =\
u.Quantity(value, current_value.unit)
else:
self[config_item_path[0]][
int(config_item_path[1].replace('item', ''))] = value
else:
self[config_item_path[0]].set_config_item(
'.'.join(config_item_path[1:]), value)
def deepcopy(self):
return ConfigurationNameSpace(copy.deepcopy(dict(self)))
class Configuration(ConfigurationNameSpace):
"""
Tardis configuration class
"""
@classmethod
def from_yaml(cls, fname, test_parser=False):
try:
yaml_dict = yaml.load(open(fname))
except IOError as e:
logger.critical('No config file named: %s', fname)
raise e
tardis_config_version = yaml_dict.get('tardis_config_version', None)
if tardis_config_version != 'v1.0':
raise ConfigurationError('Currently only tardis_config_version v1.0 supported')
return cls.from_config_dict(yaml_dict, test_parser=test_parser)
@classmethod
def from_config_dict(cls, config_dict, atom_data=None, test_parser=False,
config_definition_file=None, validate=True):
"""
Validating and subsequently parsing a config file.
Parameters
----------
config_dict : ~dict
dictionary of a raw unvalidated config file
atom_data: ~tardis.atomic.AtomData
atom data object. if `None` will be tried to be read from
atom data file path in the config_dict [default=None]
test_parser: ~bool
switch on to ignore a working atom_data, mainly useful for
testing this reader
config_definition_file: ~str
path to config definition file, if `None` will be set to the default
in the `data` directory that ships with TARDIS
validate: ~bool
Turn validation on or off.
Returns
-------
`tardis.config_reader.Configuration`
"""
if config_definition_file is None:
config_definition_file = default_config_definition_file
config_definition = yaml.load(open(config_definition_file))
if validate:
validated_config_dict = ConfigurationValidator(config_definition,
config_dict).get_config()
else:
validated_config_dict = config_dict
#First let's see if we can find an atom_db anywhere:
if test_parser:
atom_data = None
elif 'atom_data' in validated_config_dict.keys():
atom_data_fname = validated_config_dict['atom_data']
validated_config_dict['atom_data_fname'] = atom_data_fname
else:
raise ConfigurationError('No atom_data key found in config or command line')
if atom_data is None and not test_parser:
logger.info('Reading Atomic Data from %s', atom_data_fname)
atom_data = atomic.AtomData.from_hdf5(atom_data_fname)
else:
atom_data = atom_data
#Parsing supernova dictionary
validated_config_dict['supernova']['luminosity_nu_start'] = \
validated_config_dict['supernova']['luminosity_wavelength_end'].to(
u.Hz, u.spectral())
try:
validated_config_dict['supernova']['luminosity_nu_end'] = \
(validated_config_dict['supernova']
['luminosity_wavelength_start'].to(u.Hz, u.spectral()))
except ZeroDivisionError:
validated_config_dict['supernova']['luminosity_nu_end'] = (
np.inf * u.Hz)
validated_config_dict['supernova']['time_explosion'] = (
validated_config_dict['supernova']['time_explosion'].cgs)
validated_config_dict['supernova']['luminosity_requested'] = (
validated_config_dict['supernova']['luminosity_requested'].cgs)
#Parsing the model section
model_section = validated_config_dict['model']
v_inner = None
v_outer = None
mean_densities = None
abundances = None
structure_section = model_section['structure']
if structure_section['type'] == 'specific':
start, stop, num = model_section['structure']['velocity']
num += 1
velocities = np.linspace(start, stop, num)
v_inner, v_outer = velocities[:-1], velocities[1:]
mean_densities = parse_density_section(
model_section['structure']['density'], v_inner, v_outer,
validated_config_dict['supernova']['time_explosion']).cgs
elif structure_section['type'] == 'file':
v_inner, v_outer, mean_densities, inner_boundary_index, \
outer_boundary_index = read_density_file(
structure_section['filename'], structure_section['filetype'],
validated_config_dict['supernova']['time_explosion'],
structure_section['v_inner_boundary'],
structure_section['v_outer_boundary'])
r_inner = validated_config_dict['supernova']['time_explosion'] * v_inner
r_outer = validated_config_dict['supernova']['time_explosion'] * v_outer
r_middle = 0.5 * (r_inner + r_outer)
structure_validated_config_dict = {}
structure_section['v_inner'] = v_inner.cgs
structure_section['v_outer'] = v_outer.cgs
structure_section['mean_densities'] = mean_densities.cgs
no_of_shells = len(v_inner)
structure_section['no_of_shells'] = no_of_shells
structure_section['r_inner'] = r_inner.cgs
structure_section['r_outer'] = r_outer.cgs
structure_section['r_middle'] = r_middle.cgs
structure_section['volumes'] = ((4. / 3) * np.pi * \
(r_outer ** 3 -
r_inner ** 3)).cgs
#### TODO the following is legacy code and should be removed
validated_config_dict['structure'] = \
validated_config_dict['model']['structure']
# ^^^^^^^^^^^^^^^^
abundances_section = model_section['abundances']
if abundances_section['type'] == 'uniform':
abundances = pd.DataFrame(columns=np.arange(no_of_shells),
index=pd.Index(np.arange(1, 120), name='atomic_number'), dtype=np.float64)
for element_symbol_string in abundances_section:
if element_symbol_string == 'type': continue
z = element_symbol2atomic_number(element_symbol_string)
abundances.ix[z] = float(abundances_section[element_symbol_string])
elif abundances_section['type'] == 'file':
index, abundances = read_abundances_file(abundances_section['filename'], abundances_section['filetype'],
inner_boundary_index, outer_boundary_index)
if len(index) != no_of_shells:
raise ConfigurationError('The abundance file specified has not the same number of cells'
'as the specified density profile')
abundances = abundances.replace(np.nan, 0.0)
abundances = abundances[abundances.sum(axis=1) > 0]
norm_factor = abundances.sum(axis=0)
if np.any(np.abs(norm_factor - 1) > 1e-12):
logger.warning("Abundances have not been normalized to 1. - normalizing")
abundances /= norm_factor
validated_config_dict['abundances'] = abundances
########### DOING PLASMA SECTION ###############
plasma_section = validated_config_dict['plasma']
if plasma_section['initial_t_inner'] < 0.0 * u.K:
luminosity_requested = validated_config_dict['supernova']['luminosity_requested']
plasma_section['t_inner'] = ((luminosity_requested /
(4 * np.pi * r_inner[0] ** 2 *
constants.sigma_sb)) ** .25).to('K')
logger.info('"initial_t_inner" is not specified in the plasma '
'section - initializing to %s with given luminosity',
plasma_section['t_inner'])
else:
plasma_section['t_inner'] = plasma_section['initial_t_inner']
plasma_section['t_rads'] = np.ones(no_of_shells) * \
plasma_section['initial_t_rad']
if plasma_section['disable_electron_scattering'] is False:
logger.debug("Electron scattering switched on")
validated_config_dict['montecarlo']['sigma_thomson'] = 6.652486e-25 / (u.cm ** 2)
else:
logger.warn('Disabling electron scattering - this is not physical')
validated_config_dict['montecarlo']['sigma_thomson'] = 1e-200 / (u.cm ** 2)
##### NLTE subsection of Plasma start
nlte_validated_config_dict = {}
nlte_species = []
nlte_section = plasma_section['nlte']
nlte_species_list = nlte_section.pop('species')
for species_string in nlte_species_list:
nlte_species.append(species_string_to_tuple(species_string))
nlte_validated_config_dict['species'] = nlte_species
nlte_validated_config_dict['species_string'] = nlte_species_list
nlte_validated_config_dict.update(nlte_section)
if 'coronal_approximation' not in nlte_section:
logger.debug('NLTE "coronal_approximation" not specified in NLTE section - defaulting to False')
nlte_validated_config_dict['coronal_approximation'] = False
if 'classical_nebular' not in nlte_section:
logger.debug('NLTE "classical_nebular" not specified in NLTE section - defaulting to False')
nlte_validated_config_dict['classical_nebular'] = False
elif nlte_section: #checks that the dictionary is not empty
logger.warn('No "species" given - ignoring other NLTE options given:\n%s',
pp.pformat(nlte_section))
if not nlte_validated_config_dict:
nlte_validated_config_dict['species'] = []
plasma_section['nlte'] = nlte_validated_config_dict
#^^^^^^^^^^^^^^ End of Plasma Section
##### Monte Carlo Section
montecarlo_section = validated_config_dict['montecarlo']
if montecarlo_section['last_no_of_packets'] < 0:
montecarlo_section['last_no_of_packets'] = \
montecarlo_section['no_of_packets']
default_convergence_section = {'type': 'damped',
'lock_t_inner_cyles': 1,
't_inner_update_exponent': -0.5,
'global_convergence_parameters' : {
'damping_constant': 0.5}}
if montecarlo_section['convergence_strategy'] is None:
logger.warning('No convergence criteria selected - just damping by 0.5 for w, t_rad and t_inner')
montecarlo_section['convergence_strategy'] = default_convergence_section
else:
1/0
montecarlo_section['convergence_strategy'] = parse_convergence_section(
montecarlo_section['convergence_strategy'])
black_body_section = montecarlo_section['black_body_sampling']
montecarlo_section['black_body_sampling'] = {}
montecarlo_section['black_body_sampling']['start'] = \
black_body_section[0]
montecarlo_section['black_body_sampling']['end'] = \
black_body_section[1]
montecarlo_section['black_body_sampling']['samples'] = \
black_body_section[2]
###### END of convergence section reading
validated_config_dict['spectrum'] = parse_spectrum_list2dict(
validated_config_dict['spectrum'])
return cls(validated_config_dict, atom_data)
def __init__(self, config_dict, atom_data):
super(Configuration, self).__init__(config_dict)
self.atom_data = atom_data
selected_atomic_numbers = self.abundances.index
if atom_data is not None:
self.number_densities = (self.abundances * self.structure.mean_densities.to('g/cm^3').value)
self.number_densities = self.number_densities.div(self.atom_data.atom_data.mass.ix[selected_atomic_numbers],
axis=0)
else:
logger.critical('atom_data is None, only sensible for testing the parser')
| bsd-3-clause |
cmoutard/mne-python | mne/coreg.py | 5 | 38814 | """Coregistration between different coordinate frames"""
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
from .externals.six.moves import configparser
import fnmatch
from glob import glob, iglob
import os
import stat
import sys
import re
import shutil
from warnings import warn
from functools import reduce
import numpy as np
from numpy import dot
from .io import read_fiducials, write_fiducials
from .label import read_label, Label
from .source_space import (add_source_space_distances, read_source_spaces,
write_source_spaces)
from .surface import read_surface, write_surface
from .bem import read_bem_surfaces, write_bem_surfaces
from .transforms import rotation, rotation3d, scaling, translation
from .utils import get_config, get_subjects_dir, logger, pformat
from .externals.six.moves import zip
# some path templates
trans_fname = os.path.join('{raw_dir}', '{subject}-trans.fif')
subject_dirname = os.path.join('{subjects_dir}', '{subject}')
bem_dirname = os.path.join(subject_dirname, 'bem')
surf_dirname = os.path.join(subject_dirname, 'surf')
bem_fname = os.path.join(bem_dirname, "{subject}-{name}.fif")
head_bem_fname = pformat(bem_fname, name='head')
fid_fname = pformat(bem_fname, name='fiducials')
fid_fname_general = os.path.join(bem_dirname, "{head}-fiducials.fif")
src_fname = os.path.join(bem_dirname, '{subject}-{spacing}-src.fif')
def _make_writable(fname):
os.chmod(fname, stat.S_IMODE(os.lstat(fname)[stat.ST_MODE]) | 128) # write
def _make_writable_recursive(path):
"""Recursively set writable"""
if sys.platform.startswith('win'):
return # can't safely set perms
for root, dirs, files in os.walk(path, topdown=False):
for f in dirs + files:
_make_writable(os.path.join(root, f))
def create_default_subject(mne_root=None, fs_home=None, update=False,
subjects_dir=None):
"""Create an average brain subject for subjects without structural MRI
Create a copy of fsaverage from the Freesurfer directory in subjects_dir
and add auxiliary files from the mne package.
Parameters
----------
mne_root : None | str
The mne root directory (only needed if MNE_ROOT is not specified as
environment variable).
fs_home : None | str
The freesurfer home directory (only needed if FREESURFER_HOME is not
specified as environment variable).
update : bool
In cases where a copy of the fsaverage brain already exists in the
subjects_dir, this option allows to only copy files that don't already
exist in the fsaverage directory.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable
(os.environ['SUBJECTS_DIR']) as destination for the new subject.
Notes
-----
When no structural MRI is available for a subject, an average brain can be
substituted. Freesurfer comes with such an average brain model, and MNE
comes with some auxiliary files which make coregistration easier.
:py:func:`create_default_subject` copies the relevant files from Freesurfer
into the current subjects_dir, and also adds the auxiliary files provided
by MNE.
The files provided by MNE are listed below and can be found under
``share/mne/mne_analyze/fsaverage`` in the MNE directory (see MNE manual
section 7.19 Working with the average brain):
fsaverage_head.fif:
The approximate head surface triangulation for fsaverage.
fsaverage_inner_skull-bem.fif:
The approximate inner skull surface for fsaverage.
fsaverage-fiducials.fif:
The locations of the fiducial points (LPA, RPA, and nasion).
fsaverage-trans.fif:
Contains a default MEG-MRI coordinate transformation suitable for
fsaverage.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if fs_home is None:
fs_home = get_config('FREESURFER_HOME', fs_home)
if fs_home is None:
raise ValueError(
"FREESURFER_HOME environment variable not found. Please "
"specify the fs_home parameter in your call to "
"create_default_subject().")
if mne_root is None:
mne_root = get_config('MNE_ROOT', mne_root)
if mne_root is None:
raise ValueError("MNE_ROOT environment variable not found. Please "
"specify the mne_root parameter in your call to "
"create_default_subject().")
# make sure freesurfer files exist
fs_src = os.path.join(fs_home, 'subjects', 'fsaverage')
if not os.path.exists(fs_src):
raise IOError('fsaverage not found at %r. Is fs_home specified '
'correctly?' % fs_src)
for name in ('label', 'mri', 'surf'):
dirname = os.path.join(fs_src, name)
if not os.path.isdir(dirname):
raise IOError("Freesurfer fsaverage seems to be incomplete: No "
"directory named %s found in %s" % (name, fs_src))
# make sure destination does not already exist
dest = os.path.join(subjects_dir, 'fsaverage')
if dest == fs_src:
raise IOError(
"Your subjects_dir points to the freesurfer subjects_dir (%r). "
"The default subject can not be created in the freesurfer "
"installation directory; please specify a different "
"subjects_dir." % subjects_dir)
elif (not update) and os.path.exists(dest):
raise IOError(
"Can not create fsaverage because %r already exists in "
"subjects_dir %r. Delete or rename the existing fsaverage "
"subject folder." % ('fsaverage', subjects_dir))
# make sure mne files exist
mne_fname = os.path.join(mne_root, 'share', 'mne', 'mne_analyze',
'fsaverage', 'fsaverage-%s.fif')
mne_files = ('fiducials', 'head', 'inner_skull-bem', 'trans')
for name in mne_files:
fname = mne_fname % name
if not os.path.isfile(fname):
raise IOError("MNE fsaverage incomplete: %s file not found at "
"%s" % (name, fname))
# copy fsaverage from freesurfer
logger.info("Copying fsaverage subject from freesurfer directory...")
if (not update) or not os.path.exists(dest):
shutil.copytree(fs_src, dest)
_make_writable_recursive(dest)
# add files from mne
dest_bem = os.path.join(dest, 'bem')
if not os.path.exists(dest_bem):
os.mkdir(dest_bem)
logger.info("Copying auxiliary fsaverage files from mne directory...")
dest_fname = os.path.join(dest_bem, 'fsaverage-%s.fif')
_make_writable_recursive(dest_bem)
for name in mne_files:
if not os.path.exists(dest_fname % name):
shutil.copy(mne_fname % name, dest_bem)
def _decimate_points(pts, res=10):
"""Decimate the number of points using a voxel grid
Create a voxel grid with a specified resolution and retain at most one
point per voxel. For each voxel, the point closest to its center is
retained.
Parameters
----------
pts : array, shape (n_points, 3)
The points making up the head shape.
res : scalar
The resolution of the voxel space (side length of each voxel).
Returns
-------
pts : array, shape = (n_points, 3)
The decimated points.
"""
from scipy.spatial.distance import cdist
pts = np.asarray(pts)
# find the bin edges for the voxel space
xmin, ymin, zmin = pts.min(0) - res / 2.
xmax, ymax, zmax = pts.max(0) + res
xax = np.arange(xmin, xmax, res)
yax = np.arange(ymin, ymax, res)
zax = np.arange(zmin, zmax, res)
# find voxels containing one or more point
H, _ = np.histogramdd(pts, bins=(xax, yax, zax), normed=False)
# for each voxel, select one point
X, Y, Z = pts.T
out = np.empty((np.sum(H > 0), 3))
for i, (xbin, ybin, zbin) in enumerate(zip(*np.nonzero(H))):
x = xax[xbin]
y = yax[ybin]
z = zax[zbin]
xi = np.logical_and(X >= x, X < x + res)
yi = np.logical_and(Y >= y, Y < y + res)
zi = np.logical_and(Z >= z, Z < z + res)
idx = np.logical_and(zi, np.logical_and(yi, xi))
ipts = pts[idx]
mid = np.array([x, y, z]) + res / 2.
dist = cdist(ipts, [mid])
i_min = np.argmin(dist)
ipt = ipts[i_min]
out[i] = ipt
return out
def _trans_from_params(param_info, params):
"""Convert transformation parameters into a transformation matrix
Parameters
----------
param_info : tuple, len = 3
Tuple describing the parameters in x (do_translate, do_rotate,
do_scale).
params : tuple
The transformation parameters.
Returns
-------
trans : array, shape = (4, 4)
Transformation matrix.
"""
do_rotate, do_translate, do_scale = param_info
i = 0
trans = []
if do_rotate:
x, y, z = params[:3]
trans.append(rotation(x, y, z))
i += 3
if do_translate:
x, y, z = params[i:i + 3]
trans.insert(0, translation(x, y, z))
i += 3
if do_scale == 1:
s = params[i]
trans.append(scaling(s, s, s))
elif do_scale == 3:
x, y, z = params[i:i + 3]
trans.append(scaling(x, y, z))
trans = reduce(dot, trans)
return trans
def fit_matched_points(src_pts, tgt_pts, rotate=True, translate=True,
scale=False, tol=None, x0=None, out='trans'):
"""Find a transform that minimizes the squared distance between two
matching sets of points.
Uses :func:`scipy.optimize.leastsq` to find a transformation involving
a combination of rotation, translation, and scaling (in that order).
Parameters
----------
src_pts : array, shape = (n, 3)
Points to which the transform should be applied.
tgt_pts : array, shape = (n, 3)
Points to which src_pts should be fitted. Each point in tgt_pts should
correspond to the point in src_pts with the same index.
rotate : bool
Allow rotation of the ``src_pts``.
translate : bool
Allow translation of the ``src_pts``.
scale : bool
Number of scaling parameters. With False, points are not scaled. With
True, points are scaled by the same factor along all axes.
tol : scalar | None
The error tolerance. If the distance between any of the matched points
exceeds this value in the solution, a RuntimeError is raised. With
None, no error check is performed.
x0 : None | tuple
Initial values for the fit parameters.
out : 'params' | 'trans'
In what format to return the estimate: 'params' returns a tuple with
the fit parameters; 'trans' returns a transformation matrix of shape
(4, 4).
Returns
-------
One of the following, depending on the ``out`` parameter:
trans : array, shape = (4, 4)
Transformation that, if applied to src_pts, minimizes the squared
distance to tgt_pts.
params : array, shape = (n_params, )
A single tuple containing the translation, rotation and scaling
parameters in that order.
"""
from scipy.optimize import leastsq
src_pts = np.atleast_2d(src_pts)
tgt_pts = np.atleast_2d(tgt_pts)
if src_pts.shape != tgt_pts.shape:
raise ValueError("src_pts and tgt_pts must have same shape (got "
"{0}, {1})".format(src_pts.shape, tgt_pts.shape))
rotate = bool(rotate)
translate = bool(translate)
scale = int(scale)
if translate:
src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1))))
param_info = (rotate, translate, scale)
if param_info == (True, False, 0):
def error(x):
rx, ry, rz = x
trans = rotation3d(rx, ry, rz)
est = dot(src_pts, trans.T)
return (tgt_pts - est).ravel()
if x0 is None:
x0 = (0, 0, 0)
elif param_info == (True, False, 1):
def error(x):
rx, ry, rz, s = x
trans = rotation3d(rx, ry, rz) * s
est = dot(src_pts, trans.T)
return (tgt_pts - est).ravel()
if x0 is None:
x0 = (0, 0, 0, 1)
elif param_info == (True, True, 0):
def error(x):
rx, ry, rz, tx, ty, tz = x
trans = dot(translation(tx, ty, tz), rotation(rx, ry, rz))
est = dot(src_pts, trans.T)
return (tgt_pts - est[:, :3]).ravel()
if x0 is None:
x0 = (0, 0, 0, 0, 0, 0)
elif param_info == (True, True, 1):
def error(x):
rx, ry, rz, tx, ty, tz, s = x
trans = reduce(dot, (translation(tx, ty, tz), rotation(rx, ry, rz),
scaling(s, s, s)))
est = dot(src_pts, trans.T)
return (tgt_pts - est[:, :3]).ravel()
if x0 is None:
x0 = (0, 0, 0, 0, 0, 0, 1)
else:
raise NotImplementedError(
"The specified parameter combination is not implemented: "
"rotate=%r, translate=%r, scale=%r" % param_info)
x, _, _, _, _ = leastsq(error, x0, full_output=True)
# re-create the final transformation matrix
if (tol is not None) or (out == 'trans'):
trans = _trans_from_params(param_info, x)
# assess the error of the solution
if tol is not None:
if not translate:
src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1))))
est_pts = dot(src_pts, trans.T)[:, :3]
err = np.sqrt(np.sum((est_pts - tgt_pts) ** 2, axis=1))
if np.any(err > tol):
raise RuntimeError("Error exceeds tolerance. Error = %r" % err)
if out == 'params':
return x
elif out == 'trans':
return trans
else:
raise ValueError("Invalid out parameter: %r. Needs to be 'params' or "
"'trans'." % out)
def _point_cloud_error(src_pts, tgt_pts):
"""Find the distance from each source point to its closest target point
Parameters
----------
src_pts : array, shape = (n, 3)
Source points.
tgt_pts : array, shape = (m, 3)
Target points.
Returns
-------
dist : array, shape = (n, )
For each point in ``src_pts``, the distance to the closest point in
``tgt_pts``.
"""
from scipy.spatial.distance import cdist
Y = cdist(src_pts, tgt_pts, 'euclidean')
dist = Y.min(axis=1)
return dist
def _point_cloud_error_balltree(src_pts, tgt_tree):
"""Find the distance from each source point to its closest target point
Uses sklearn.neighbors.BallTree for greater efficiency
Parameters
----------
src_pts : array, shape = (n, 3)
Source points.
tgt_tree : sklearn.neighbors.BallTree
BallTree of the target points.
Returns
-------
dist : array, shape = (n, )
For each point in ``src_pts``, the distance to the closest point in
``tgt_pts``.
"""
dist, _ = tgt_tree.query(src_pts)
return dist.ravel()
def fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=True,
scale=0, x0=None, leastsq_args={}, out='params'):
"""Find a transform that minimizes the squared distance from each source
point to its closest target point
Uses :func:`scipy.optimize.leastsq` to find a transformation involving
a combination of rotation, translation, and scaling (in that order).
Parameters
----------
src_pts : array, shape = (n, 3)
Points to which the transform should be applied.
tgt_pts : array, shape = (m, 3)
Points to which src_pts should be fitted. Each point in tgt_pts should
correspond to the point in src_pts with the same index.
rotate : bool
Allow rotation of the ``src_pts``.
translate : bool
Allow translation of the ``src_pts``.
scale : 0 | 1 | 3
Number of scaling parameters. With 0, points are not scaled. With 1,
points are scaled by the same factor along all axes. With 3, points are
scaled by a separate factor along each axis.
x0 : None | tuple
Initial values for the fit parameters.
leastsq_args : dict
Additional parameters to submit to :func:`scipy.optimize.leastsq`.
out : 'params' | 'trans'
In what format to return the estimate: 'params' returns a tuple with
the fit parameters; 'trans' returns a transformation matrix of shape
(4, 4).
Returns
-------
x : array, shape = (n_params, )
Estimated parameters for the transformation.
Notes
-----
Assumes that the target points form a dense enough point cloud so that
the distance of each src_pt to the closest tgt_pt can be used as an
estimate of the distance of src_pt to tgt_pts.
"""
from scipy.optimize import leastsq
kwargs = {'epsfcn': 0.01}
kwargs.update(leastsq_args)
# assert correct argument types
src_pts = np.atleast_2d(src_pts)
tgt_pts = np.atleast_2d(tgt_pts)
translate = bool(translate)
rotate = bool(rotate)
scale = int(scale)
if translate:
src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1))))
try:
from sklearn.neighbors import BallTree
tgt_pts = BallTree(tgt_pts)
errfunc = _point_cloud_error_balltree
except ImportError:
warn("Sklearn could not be imported. Fitting points will be slower. "
"To improve performance, install the sklearn module.")
errfunc = _point_cloud_error
# for efficiency, define parameter specific error function
param_info = (rotate, translate, scale)
if param_info == (True, False, 0):
x0 = x0 or (0, 0, 0)
def error(x):
rx, ry, rz = x
trans = rotation3d(rx, ry, rz)
est = dot(src_pts, trans.T)
err = errfunc(est, tgt_pts)
return err
elif param_info == (True, False, 1):
x0 = x0 or (0, 0, 0, 1)
def error(x):
rx, ry, rz, s = x
trans = rotation3d(rx, ry, rz) * s
est = dot(src_pts, trans.T)
err = errfunc(est, tgt_pts)
return err
elif param_info == (True, False, 3):
x0 = x0 or (0, 0, 0, 1, 1, 1)
def error(x):
rx, ry, rz, sx, sy, sz = x
trans = rotation3d(rx, ry, rz) * [sx, sy, sz]
est = dot(src_pts, trans.T)
err = errfunc(est, tgt_pts)
return err
elif param_info == (True, True, 0):
x0 = x0 or (0, 0, 0, 0, 0, 0)
def error(x):
rx, ry, rz, tx, ty, tz = x
trans = dot(translation(tx, ty, tz), rotation(rx, ry, rz))
est = dot(src_pts, trans.T)
err = errfunc(est[:, :3], tgt_pts)
return err
else:
raise NotImplementedError(
"The specified parameter combination is not implemented: "
"rotate=%r, translate=%r, scale=%r" % param_info)
est, _, info, msg, _ = leastsq(error, x0, full_output=True, **kwargs)
logger.debug("fit_point_cloud leastsq (%i calls) info: %s", info['nfev'],
msg)
if out == 'params':
return est
elif out == 'trans':
return _trans_from_params(param_info, est)
else:
raise ValueError("Invalid out parameter: %r. Needs to be 'params' or "
"'trans'." % out)
def _find_label_paths(subject='fsaverage', pattern=None, subjects_dir=None):
"""Find paths to label files in a subject's label directory
Parameters
----------
subject : str
Name of the mri subject.
pattern : str | None
Pattern for finding the labels relative to the label directory in the
MRI subject directory (e.g., "aparc/*.label" will find all labels
in the "subject/label/aparc" directory). With None, find all labels.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable
(sys.environ['SUBJECTS_DIR'])
Returns
------
paths : list
List of paths relative to the subject's label directory
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
subject_dir = os.path.join(subjects_dir, subject)
lbl_dir = os.path.join(subject_dir, 'label')
if pattern is None:
paths = []
for dirpath, _, filenames in os.walk(lbl_dir):
rel_dir = os.path.relpath(dirpath, lbl_dir)
for filename in fnmatch.filter(filenames, '*.label'):
path = os.path.join(rel_dir, filename)
paths.append(path)
else:
paths = [os.path.relpath(path, lbl_dir) for path in iglob(pattern)]
return paths
def _find_mri_paths(subject='fsaverage', subjects_dir=None):
"""Find all files of an mri relevant for source transformation
Parameters
----------
subject : str
Name of the mri subject.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable
(sys.environ['SUBJECTS_DIR'])
Returns
-------
paths | dict
Dictionary whose keys are relevant file type names (str), and whose
values are lists of paths.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
paths = {}
# directories to create
paths['dirs'] = [bem_dirname, surf_dirname]
# surf/ files
paths['surf'] = surf = []
surf_fname = os.path.join(surf_dirname, '{name}')
surf_names = ('inflated', 'sphere', 'sphere.reg', 'white')
if os.getenv('_MNE_FEW_SURFACES', '') != 'true': # for testing
surf_names = surf_names + (
'orig', 'orig_avg', 'inflated_avg', 'inflated_pre', 'pial',
'pial_avg', 'smoothwm', 'white_avg', 'sphere.reg.avg')
for name in surf_names:
for hemi in ('lh.', 'rh.'):
fname = pformat(surf_fname, name=hemi + name)
surf.append(fname)
# BEM files
paths['bem'] = bem = []
path = head_bem_fname.format(subjects_dir=subjects_dir, subject=subject)
if os.path.exists(path):
bem.append('head')
bem_pattern = pformat(bem_fname, subjects_dir=subjects_dir,
subject=subject, name='*-bem')
re_pattern = pformat(bem_fname, subjects_dir=subjects_dir, subject=subject,
name='(.+)')
for path in iglob(bem_pattern):
match = re.match(re_pattern, path)
name = match.group(1)
bem.append(name)
# fiducials
paths['fid'] = [fid_fname]
# duplicate curvature files
paths['duplicate'] = dup = []
path = os.path.join(surf_dirname, '{name}')
for name in ['lh.curv', 'rh.curv']:
fname = pformat(path, name=name)
dup.append(fname)
# check presence of required files
for ftype in ['surf', 'fid', 'duplicate']:
for fname in paths[ftype]:
path = fname.format(subjects_dir=subjects_dir, subject=subject)
path = os.path.realpath(path)
if not os.path.exists(path):
raise IOError("Required file not found: %r" % path)
# find source space files
paths['src'] = src = []
bem_dir = bem_dirname.format(subjects_dir=subjects_dir, subject=subject)
fnames = fnmatch.filter(os.listdir(bem_dir), '*-src.fif')
prefix = subject + '-'
for fname in fnames:
if fname.startswith(prefix):
fname = "{subject}-%s" % fname[len(prefix):]
path = os.path.join(bem_dirname, fname)
src.append(path)
return paths
def _is_mri_subject(subject, subjects_dir=None):
"""Check whether a directory in subjects_dir is an mri subject directory
Parameters
----------
subject : str
Name of the potential subject/directory.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
Returns
-------
is_mri_subject : bool
Whether ``subject`` is an mri subject.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
fname = head_bem_fname.format(subjects_dir=subjects_dir, subject=subject)
if not os.path.exists(fname):
return False
return True
def _mri_subject_has_bem(subject, subjects_dir=None):
"""Check whether an mri subject has a file matching the bem pattern
Parameters
----------
subject : str
Name of the subject.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
Returns
-------
has_bem_file : bool
Whether ``subject`` has a bem file.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
pattern = bem_fname.format(subjects_dir=subjects_dir, subject=subject,
name='*-bem')
fnames = glob(pattern)
return bool(len(fnames))
def read_mri_cfg(subject, subjects_dir=None):
"""Read information from the cfg file of a scaled MRI brain
Parameters
----------
subject : str
Name of the scaled MRI subject.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
Returns
-------
cfg : dict
Dictionary with entries from the MRI's cfg file.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
fname = os.path.join(subjects_dir, subject, 'MRI scaling parameters.cfg')
if not os.path.exists(fname):
raise IOError("%r does not seem to be a scaled mri subject: %r does "
"not exist." % (subject, fname))
logger.info("Reading MRI cfg file %s" % fname)
config = configparser.RawConfigParser()
config.read(fname)
n_params = config.getint("MRI Scaling", 'n_params')
if n_params == 1:
scale = config.getfloat("MRI Scaling", 'scale')
elif n_params == 3:
scale_str = config.get("MRI Scaling", 'scale')
scale = np.array([float(s) for s in scale_str.split()])
else:
raise ValueError("Invalid n_params value in MRI cfg: %i" % n_params)
out = {'subject_from': config.get("MRI Scaling", 'subject_from'),
'n_params': n_params, 'scale': scale}
return out
def _write_mri_config(fname, subject_from, subject_to, scale):
"""Write the cfg file describing a scaled MRI subject
Parameters
----------
fname : str
Target file.
subject_from : str
Name of the source MRI subject.
subject_to : str
Name of the scaled MRI subject.
scale : float | array_like, shape = (3,)
The scaling parameter.
"""
scale = np.asarray(scale)
if np.isscalar(scale) or scale.shape == ():
n_params = 1
else:
n_params = 3
config = configparser.RawConfigParser()
config.add_section("MRI Scaling")
config.set("MRI Scaling", 'subject_from', subject_from)
config.set("MRI Scaling", 'subject_to', subject_to)
config.set("MRI Scaling", 'n_params', str(n_params))
if n_params == 1:
config.set("MRI Scaling", 'scale', str(scale))
else:
config.set("MRI Scaling", 'scale', ' '.join([str(s) for s in scale]))
config.set("MRI Scaling", 'version', '1')
with open(fname, 'w') as fid:
config.write(fid)
def _scale_params(subject_to, subject_from, scale, subjects_dir):
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if (subject_from is None) != (scale is None):
raise TypeError("Need to provide either both subject_from and scale "
"parameters, or neither.")
if subject_from is None:
cfg = read_mri_cfg(subject_to, subjects_dir)
subject_from = cfg['subject_from']
n_params = cfg['n_params']
scale = cfg['scale']
else:
scale = np.asarray(scale)
if scale.ndim == 0:
n_params = 1
elif scale.shape == (3,):
n_params = 3
else:
raise ValueError("Invalid shape for scale parameer. Need scalar "
"or array of length 3. Got %s." % str(scale))
return subjects_dir, subject_from, n_params, scale
def scale_bem(subject_to, bem_name, subject_from=None, scale=None,
subjects_dir=None):
"""Scale a bem file
Parameters
----------
subject_to : str
Name of the scaled MRI subject (the destination mri subject).
bem_name : str
Name of the bem file. For example, to scale
``fsaverage-inner_skull-bem.fif``, the bem_name would be
"inner_skull-bem".
subject_from : None | str
The subject from which to read the source space. If None, subject_from
is read from subject_to's config file.
scale : None | float | array, shape = (3,)
Scaling factor. Has to be specified if subjects_from is specified,
otherwise it is read from subject_to's config file.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
"""
subjects_dir, subject_from, _, scale = _scale_params(subject_to,
subject_from, scale,
subjects_dir)
src = bem_fname.format(subjects_dir=subjects_dir, subject=subject_from,
name=bem_name)
dst = bem_fname.format(subjects_dir=subjects_dir, subject=subject_to,
name=bem_name)
if os.path.exists(dst):
raise IOError("File alredy exists: %s" % dst)
surfs = read_bem_surfaces(src)
if len(surfs) != 1:
raise NotImplementedError("BEM file with more than one surface: %r"
% src)
surf0 = surfs[0]
surf0['rr'] = surf0['rr'] * scale
write_bem_surfaces(dst, surf0)
def scale_labels(subject_to, pattern=None, overwrite=False, subject_from=None,
scale=None, subjects_dir=None):
"""Scale labels to match a brain that was previously created by scaling
Parameters
----------
subject_to : str
Name of the scaled MRI subject (the destination brain).
pattern : str | None
Pattern for finding the labels relative to the label directory in the
MRI subject directory (e.g., "lh.BA3a.label" will scale
"fsaverage/label/lh.BA3a.label"; "aparc/\*.label" will find all labels
in the "fsaverage/label/aparc" directory). With None, scale all labels.
overwrite : bool
Overwrite any label file that already exists for subject_to (otherwise
existsing labels are skipped).
subject_from : None | str
Name of the original MRI subject (the brain that was scaled to create
subject_to). If None, the value is read from subject_to's cfg file.
scale : None | float | array_like, shape = (3,)
Scaling parameter. If None, the value is read from subject_to's cfg
file.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
"""
# read parameters from cfg
if scale is None or subject_from is None:
cfg = read_mri_cfg(subject_to, subjects_dir)
if subject_from is None:
subject_from = cfg['subject_from']
if scale is None:
scale = cfg['scale']
# find labels
paths = _find_label_paths(subject_from, pattern, subjects_dir)
if not paths:
return
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
src_root = os.path.join(subjects_dir, subject_from, 'label')
dst_root = os.path.join(subjects_dir, subject_to, 'label')
# scale labels
for fname in paths:
dst = os.path.join(dst_root, fname)
if not overwrite and os.path.exists(dst):
continue
dirname = os.path.dirname(dst)
if not os.path.exists(dirname):
os.makedirs(dirname)
src = os.path.join(src_root, fname)
l_old = read_label(src)
pos = l_old.pos * scale
l_new = Label(l_old.vertices, pos, l_old.values, l_old.hemi,
l_old.comment, subject=subject_to)
l_new.save(dst)
def scale_mri(subject_from, subject_to, scale, overwrite=False,
subjects_dir=None):
"""Create a scaled copy of an MRI subject
Parameters
----------
subject_from : str
Name of the subject providing the MRI.
subject_to : str
New subject name for which to save the scaled MRI.
scale : float | array_like, shape = (3,)
The scaling factor (one or 3 parameters).
overwrite : bool
If an MRI already exists for subject_to, overwrite it.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
See Also
--------
scale_labels : add labels to a scaled MRI
scale_source_space : add a source space to a scaled MRI
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
paths = _find_mri_paths(subject_from, subjects_dir=subjects_dir)
scale = np.asarray(scale)
# make sure we have an empty target directory
dest = subject_dirname.format(subject=subject_to,
subjects_dir=subjects_dir)
if os.path.exists(dest):
if overwrite:
shutil.rmtree(dest)
else:
raise IOError("Subject directory for %s already exists: %r"
% (subject_to, dest))
for dirname in paths['dirs']:
dir_ = dirname.format(subject=subject_to, subjects_dir=subjects_dir)
os.makedirs(dir_)
# save MRI scaling parameters
fname = os.path.join(dest, 'MRI scaling parameters.cfg')
_write_mri_config(fname, subject_from, subject_to, scale)
# surf files [in mm]
for fname in paths['surf']:
src = fname.format(subject=subject_from, subjects_dir=subjects_dir)
src = os.path.realpath(src)
dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)
pts, tri = read_surface(src)
write_surface(dest, pts * scale, tri)
# BEM files [in m]
for bem_name in paths['bem']:
scale_bem(subject_to, bem_name, subject_from, scale, subjects_dir)
# fiducials [in m]
for fname in paths['fid']:
src = fname.format(subject=subject_from, subjects_dir=subjects_dir)
src = os.path.realpath(src)
pts, cframe = read_fiducials(src)
for pt in pts:
pt['r'] = pt['r'] * scale
dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)
write_fiducials(dest, pts, cframe)
# duplicate files
for fname in paths['duplicate']:
src = fname.format(subject=subject_from, subjects_dir=subjects_dir)
dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)
shutil.copyfile(src, dest)
# source spaces
for fname in paths['src']:
src_name = os.path.basename(fname)
scale_source_space(subject_to, src_name, subject_from, scale,
subjects_dir)
# labels [in m]
scale_labels(subject_to, subject_from=subject_from, scale=scale,
subjects_dir=subjects_dir)
def scale_source_space(subject_to, src_name, subject_from=None, scale=None,
subjects_dir=None, n_jobs=1):
"""Scale a source space for an mri created with scale_mri()
Parameters
----------
subject_to : str
Name of the scaled MRI subject (the destination mri subject).
src_name : str
Source space name. Can be a spacing parameter (e.g., ``'7'``,
``'ico4'``, ``'oct6'``) or a file name of a source space file relative
to the bem directory; if the file name contains the subject name, it
should be indicated as "{subject}" in ``src_name`` (e.g.,
``"{subject}-my_source_space-src.fif"``).
subject_from : None | str
The subject from which to read the source space. If None, subject_from
is read from subject_to's config file.
scale : None | float | array, shape = (3,)
Scaling factor. Has to be specified if subjects_from is specified,
otherwise it is read from subject_to's config file.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
n_jobs : int
Number of jobs to run in parallel if recomputing distances (only
applies if scale is an array of length 3, and will not use more cores
than there are source spaces).
"""
subjects_dir, subject_from, n_params, scale = _scale_params(subject_to,
subject_from,
scale,
subjects_dir)
# find the source space file names
if src_name.isdigit():
spacing = src_name # spacing in mm
src_pattern = src_fname
else:
match = re.match("(oct|ico)-?(\d+)$", src_name)
if match:
spacing = '-'.join(match.groups())
src_pattern = src_fname
else:
spacing = None
src_pattern = os.path.join(bem_dirname, src_name)
src = src_pattern.format(subjects_dir=subjects_dir, subject=subject_from,
spacing=spacing)
dst = src_pattern.format(subjects_dir=subjects_dir, subject=subject_to,
spacing=spacing)
# prepare scaling parameters
if n_params == 1:
norm_scale = None
elif n_params == 3:
norm_scale = 1. / scale
else:
raise RuntimeError("Invalid n_params entry in MRI cfg file: %s"
% str(n_params))
# read and scale the source space [in m]
sss = read_source_spaces(src)
logger.info("scaling source space %s: %s -> %s", spacing, subject_from,
subject_to)
logger.info("Scale factor: %s", scale)
add_dist = False
for ss in sss:
ss['subject_his_id'] = subject_to
ss['rr'] *= scale
# distances and patch info
if norm_scale is None:
if ss['dist'] is not None:
ss['dist'] *= scale
ss['nearest_dist'] *= scale
ss['dist_limit'] *= scale
else:
nn = ss['nn']
nn *= norm_scale
norm = np.sqrt(np.sum(nn ** 2, 1))
nn /= norm[:, np.newaxis]
if ss['dist'] is not None:
add_dist = True
if add_dist:
logger.info("Recomputing distances, this might take a while")
dist_limit = np.asscalar(sss[0]['dist_limit'])
add_source_space_distances(sss, dist_limit, n_jobs)
write_source_spaces(dst, sss)
| bsd-3-clause |
alfonsokim/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/pyplot.py | 69 | 77521 | import sys
import matplotlib
from matplotlib import _pylab_helpers, interactive
from matplotlib.cbook import dedent, silent_list, is_string_like, is_numlike
from matplotlib.figure import Figure, figaspect
from matplotlib.backend_bases import FigureCanvasBase
from matplotlib.image import imread as _imread
from matplotlib import rcParams, rcParamsDefault, get_backend
from matplotlib.rcsetup import interactive_bk as _interactive_bk
from matplotlib.artist import getp, get, Artist
from matplotlib.artist import setp as _setp
from matplotlib.axes import Axes
from matplotlib.projections import PolarAxes
from matplotlib import mlab # for csv2rec in plotfile
from matplotlib.scale import get_scale_docs, get_scale_names
from matplotlib import cm
from matplotlib.cm import get_cmap
# We may not need the following imports here:
from matplotlib.colors import Normalize, normalize # latter for backwards compat.
from matplotlib.lines import Line2D
from matplotlib.text import Text, Annotation
from matplotlib.patches import Polygon, Rectangle, Circle, Arrow
from matplotlib.widgets import SubplotTool, Button, Slider, Widget
from ticker import TickHelper, Formatter, FixedFormatter, NullFormatter,\
FuncFormatter, FormatStrFormatter, ScalarFormatter,\
LogFormatter, LogFormatterExponent, LogFormatterMathtext,\
Locator, IndexLocator, FixedLocator, NullLocator,\
LinearLocator, LogLocator, AutoLocator, MultipleLocator,\
MaxNLocator
## Backend detection ##
def _backend_selection():
""" If rcParams['backend_fallback'] is true, check to see if the
current backend is compatible with the current running event
loop, and if not switches to a compatible one.
"""
backend = rcParams['backend']
if not rcParams['backend_fallback'] or \
backend not in _interactive_bk:
return
is_agg_backend = rcParams['backend'].endswith('Agg')
if 'wx' in sys.modules and not backend in ('WX', 'WXAgg'):
import wx
if wx.App.IsMainLoopRunning():
rcParams['backend'] = 'wx' + 'Agg' * is_agg_backend
elif 'qt' in sys.modules and not backend == 'QtAgg':
import qt
if not qt.qApp.startingUp():
# The mainloop is running.
rcParams['backend'] = 'qtAgg'
elif 'PyQt4.QtCore' in sys.modules and not backend == 'Qt4Agg':
import PyQt4.QtGui
if not PyQt4.QtGui.qApp.startingUp():
# The mainloop is running.
rcParams['backend'] = 'qt4Agg'
elif 'gtk' in sys.modules and not backend in ('GTK', 'GTKAgg',
'GTKCairo'):
import gobject
if gobject.MainLoop().is_running():
rcParams['backend'] = 'gtk' + 'Agg' * is_agg_backend
elif 'Tkinter' in sys.modules and not backend == 'TkAgg':
#import Tkinter
pass #what if anything do we need to do for tkinter?
_backend_selection()
## Global ##
from matplotlib.backends import pylab_setup
new_figure_manager, draw_if_interactive, show = pylab_setup()
def findobj(o=None, match=None):
if o is None:
o = gcf()
return o.findobj(match)
findobj.__doc__ = Artist.findobj.__doc__
def switch_backend(newbackend):
"""
Switch the default backend to newbackend. This feature is
**experimental**, and is only expected to work switching to an
image backend. Eg, if you have a bunch of PostScript scripts that
you want to run from an interactive ipython session, you may want
to switch to the PS backend before running them to avoid having a
bunch of GUI windows popup. If you try to interactively switch
from one GUI backend to another, you will explode.
Calling this command will close all open windows.
"""
close('all')
global new_figure_manager, draw_if_interactive, show
matplotlib.use(newbackend, warn=False)
reload(matplotlib.backends)
from matplotlib.backends import pylab_setup
new_figure_manager, draw_if_interactive, show = pylab_setup()
def isinteractive():
"""
Return the interactive status
"""
return matplotlib.is_interactive()
def ioff():
'Turn interactive mode off.'
matplotlib.interactive(False)
def ion():
'Turn interactive mode on.'
matplotlib.interactive(True)
def rc(*args, **kwargs):
matplotlib.rc(*args, **kwargs)
if matplotlib.rc.__doc__ is not None:
rc.__doc__ = dedent(matplotlib.rc.__doc__)
def rcdefaults():
matplotlib.rcdefaults()
draw_if_interactive()
if matplotlib.rcdefaults.__doc__ is not None:
rcdefaults.__doc__ = dedent(matplotlib.rcdefaults.__doc__)
# The current "image" (ScalarMappable) is tracked here on a
# per-pylab-session basis:
def gci():
"""
Get the current :class:`~matplotlib.cm.ScalarMappable` instance
(image or patch collection), or *None* if no images or patch
collections have been defined. The commands
:func:`~matplotlib.pyplot.imshow` and
:func:`~matplotlib.pyplot.figimage` create
:class:`~matplotlib.image.Image` instances, and the commands
:func:`~matplotlib.pyplot.pcolor` and
:func:`~matplotlib.pyplot.scatter` create
:class:`~matplotlib.collections.Collection` instances.
"""
return gci._current
gci._current = None
def sci(im):
"""
Set the current image (target of colormap commands like
:func:`~matplotlib.pyplot.jet`, :func:`~matplotlib.pyplot.hot` or
:func:`~matplotlib.pyplot.clim`).
"""
gci._current = im
## Any Artist ##
# (getp is simply imported)
def setp(*args, **kwargs):
ret = _setp(*args, **kwargs)
draw_if_interactive()
return ret
if _setp.__doc__ is not None:
setp.__doc__ = _setp.__doc__
## Figures ##
def figure(num=None, # autoincrement if None, else integer from 1-N
figsize = None, # defaults to rc figure.figsize
dpi = None, # defaults to rc figure.dpi
facecolor = None, # defaults to rc figure.facecolor
edgecolor = None, # defaults to rc figure.edgecolor
frameon = True,
FigureClass = Figure,
**kwargs
):
"""
call signature::
figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
Create a new figure and return a :class:`matplotlib.figure.Figure`
instance. If *num* = *None*, the figure number will be incremented and
a new figure will be created. The returned figure objects have a
*number* attribute holding this number.
If *num* is an integer, and ``figure(num)`` already exists, make it
active and return the handle to it. If ``figure(num)`` does not exist
it will be created. Numbering starts at 1, matlab style::
figure(1)
If you are creating many figures, make sure you explicitly call "close"
on the figures you are not using, because this will enable pylab
to properly clean up the memory.
Optional keyword arguments:
========= =======================================================
Keyword Description
========= =======================================================
figsize width x height in inches; defaults to rc figure.figsize
dpi resolution; defaults to rc figure.dpi
facecolor the background color; defaults to rc figure.facecolor
edgecolor the border color; defaults to rc figure.edgecolor
========= =======================================================
rcParams defines the default values, which can be modified in the
matplotlibrc file
*FigureClass* is a :class:`~matplotlib.figure.Figure` or derived
class that will be passed on to :meth:`new_figure_manager` in the
backends which allows you to hook custom Figure classes into the
pylab interface. Additional kwargs will be passed on to your
figure init function.
"""
if figsize is None : figsize = rcParams['figure.figsize']
if dpi is None : dpi = rcParams['figure.dpi']
if facecolor is None : facecolor = rcParams['figure.facecolor']
if edgecolor is None : edgecolor = rcParams['figure.edgecolor']
if num is None:
allnums = [f.num for f in _pylab_helpers.Gcf.get_all_fig_managers()]
if allnums:
num = max(allnums) + 1
else:
num = 1
else:
num = int(num) # crude validation of num argument
figManager = _pylab_helpers.Gcf.get_fig_manager(num)
if figManager is None:
if get_backend().lower() == 'ps': dpi = 72
figManager = new_figure_manager(num, figsize=figsize,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
frameon=frameon,
FigureClass=FigureClass,
**kwargs)
# make this figure current on button press event
def make_active(event):
_pylab_helpers.Gcf.set_active(figManager)
cid = figManager.canvas.mpl_connect('button_press_event', make_active)
figManager._cidgcf = cid
_pylab_helpers.Gcf.set_active(figManager)
figManager.canvas.figure.number = num
draw_if_interactive()
return figManager.canvas.figure
def gcf():
"Return a handle to the current figure."
figManager = _pylab_helpers.Gcf.get_active()
if figManager is not None:
return figManager.canvas.figure
else:
return figure()
def get_current_fig_manager():
figManager = _pylab_helpers.Gcf.get_active()
if figManager is None:
gcf() # creates an active figure as a side effect
figManager = _pylab_helpers.Gcf.get_active()
return figManager
# note we check for __doc__ is not None since py2exe optimize removes
# the docstrings
def connect(s, func):
return get_current_fig_manager().canvas.mpl_connect(s, func)
if FigureCanvasBase.mpl_connect.__doc__ is not None:
connect.__doc__ = dedent(FigureCanvasBase.mpl_connect.__doc__)
def disconnect(cid):
return get_current_fig_manager().canvas.mpl_disconnect(cid)
if FigureCanvasBase.mpl_disconnect.__doc__ is not None:
disconnect.__doc__ = dedent(FigureCanvasBase.mpl_disconnect.__doc__)
def close(*args):
"""
Close a figure window
``close()`` by itself closes the current figure
``close(num)`` closes figure number *num*
``close(h)`` where *h* is a :class:`Figure` instance, closes that figure
``close('all')`` closes all the figure windows
"""
if len(args)==0:
figManager = _pylab_helpers.Gcf.get_active()
if figManager is None: return
else:
figManager.canvas.mpl_disconnect(figManager._cidgcf)
_pylab_helpers.Gcf.destroy(figManager.num)
elif len(args)==1:
arg = args[0]
if arg=='all':
for manager in _pylab_helpers.Gcf.get_all_fig_managers():
manager.canvas.mpl_disconnect(manager._cidgcf)
_pylab_helpers.Gcf.destroy(manager.num)
elif isinstance(arg, int):
_pylab_helpers.Gcf.destroy(arg)
elif isinstance(arg, Figure):
for manager in _pylab_helpers.Gcf.get_all_fig_managers():
if manager.canvas.figure==arg:
manager.canvas.mpl_disconnect(manager._cidgcf)
_pylab_helpers.Gcf.destroy(manager.num)
else:
raise TypeError('Unrecognized argument type %s to close'%type(arg))
else:
raise TypeError('close takes 0 or 1 arguments')
def clf():
"""
Clear the current figure
"""
gcf().clf()
draw_if_interactive()
def draw():
'redraw the current figure'
get_current_fig_manager().canvas.draw()
def savefig(*args, **kwargs):
fig = gcf()
return fig.savefig(*args, **kwargs)
if Figure.savefig.__doc__ is not None:
savefig.__doc__ = dedent(Figure.savefig.__doc__)
def ginput(*args, **kwargs):
"""
Blocking call to interact with the figure.
This will wait for *n* clicks from the user and return a list of the
coordinates of each click.
If *timeout* is negative, does not timeout.
"""
return gcf().ginput(*args, **kwargs)
if Figure.ginput.__doc__ is not None:
ginput.__doc__ = dedent(Figure.ginput.__doc__)
def waitforbuttonpress(*args, **kwargs):
"""
Blocking call to interact with the figure.
This will wait for *n* key or mouse clicks from the user and
return a list containing True's for keyboard clicks and False's
for mouse clicks.
If *timeout* is negative, does not timeout.
"""
return gcf().waitforbuttonpress(*args, **kwargs)
if Figure.waitforbuttonpress.__doc__ is not None:
waitforbuttonpress.__doc__ = dedent(Figure.waitforbuttonpress.__doc__)
# Putting things in figures
def figtext(*args, **kwargs):
ret = gcf().text(*args, **kwargs)
draw_if_interactive()
return ret
if Figure.text.__doc__ is not None:
figtext.__doc__ = dedent(Figure.text.__doc__)
def suptitle(*args, **kwargs):
ret = gcf().suptitle(*args, **kwargs)
draw_if_interactive()
return ret
if Figure.suptitle.__doc__ is not None:
suptitle.__doc__ = dedent(Figure.suptitle.__doc__)
def figimage(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
ret = gcf().figimage(*args, **kwargs)
draw_if_interactive()
gci._current = ret
return ret
if Figure.figimage.__doc__ is not None:
figimage.__doc__ = dedent(Figure.figimage.__doc__) + """
Addition kwargs: hold = [True|False] overrides default hold state"""
def figlegend(handles, labels, loc, **kwargs):
"""
Place a legend in the figure.
*labels*
a sequence of strings
*handles*
a sequence of :class:`~matplotlib.lines.Line2D` or
:class:`~matplotlib.patches.Patch` instances
*loc*
can be a string or an integer specifying the legend
location
A :class:`matplotlib.legend.Legend` instance is returned.
Example::
figlegend( (line1, line2, line3),
('label1', 'label2', 'label3'),
'upper right' )
.. seealso::
:func:`~matplotlib.pyplot.legend`:
For information about the location codes
"""
l = gcf().legend(handles, labels, loc, **kwargs)
draw_if_interactive()
return l
## Figure and Axes hybrid ##
def hold(b=None):
"""
Set the hold state. If *b* is None (default), toggle the
hold state, else set the hold state to boolean value *b*::
hold() # toggle hold
hold(True) # hold is on
hold(False) # hold is off
When *hold* is *True*, subsequent plot commands will be added to
the current axes. When *hold* is *False*, the current axes and
figure will be cleared on the next plot command.
"""
fig = gcf()
ax = fig.gca()
fig.hold(b)
ax.hold(b)
# b=None toggles the hold state, so let's get get the current hold
# state; but should pyplot hold toggle the rc setting - me thinks
# not
b = ax.ishold()
rc('axes', hold=b)
def ishold():
"""
Return the hold status of the current axes
"""
return gca().ishold()
def over(func, *args, **kwargs):
"""
over calls::
func(*args, **kwargs)
with ``hold(True)`` and then restores the hold state.
"""
h = ishold()
hold(True)
func(*args, **kwargs)
hold(h)
## Axes ##
def axes(*args, **kwargs):
"""
Add an axes at position rect specified by:
- ``axes()`` by itself creates a default full ``subplot(111)`` window axis.
- ``axes(rect, axisbg='w')`` where *rect* = [left, bottom, width,
height] in normalized (0, 1) units. *axisbg* is the background
color for the axis, default white.
- ``axes(h)`` where *h* is an axes instance makes *h* the current
axis. An :class:`~matplotlib.axes.Axes` instance is returned.
======= ============ ================================================
kwarg Accepts Desctiption
======= ============ ================================================
axisbg color the axes background color
frameon [True|False] display the frame?
sharex otherax current axes shares xaxis attribute with otherax
sharey otherax current axes shares yaxis attribute with otherax
polar [True|False] use a polar axes?
======= ============ ================================================
Examples:
* :file:`examples/pylab_examples/axes_demo.py` places custom axes.
* :file:`examples/pylab_examples/shared_axis_demo.py` uses
*sharex* and *sharey*.
"""
nargs = len(args)
if len(args)==0: return subplot(111, **kwargs)
if nargs>1:
raise TypeError('Only one non keyword arg to axes allowed')
arg = args[0]
if isinstance(arg, Axes):
a = gcf().sca(arg)
else:
rect = arg
a = gcf().add_axes(rect, **kwargs)
draw_if_interactive()
return a
def delaxes(*args):
"""
``delaxes(ax)``: remove *ax* from the current figure. If *ax*
doesn't exist, an error will be raised.
``delaxes()``: delete the current axes
"""
if not len(args):
ax = gca()
else:
ax = args[0]
ret = gcf().delaxes(ax)
draw_if_interactive()
return ret
def gca(**kwargs):
"""
Return the current axis instance. This can be used to control
axis properties either using set or the
:class:`~matplotlib.axes.Axes` methods, for example, setting the
xaxis range::
plot(t,s)
set(gca(), 'xlim', [0,10])
or::
plot(t,s)
a = gca()
a.set_xlim([0,10])
"""
ax = gcf().gca(**kwargs)
return ax
# More ways of creating axes:
def subplot(*args, **kwargs):
"""
Create a subplot command, creating axes with::
subplot(numRows, numCols, plotNum)
where *plotNum* = 1 is the first plot number and increasing *plotNums*
fill rows first. max(*plotNum*) == *numRows* * *numCols*
You can leave out the commas if *numRows* <= *numCols* <=
*plotNum* < 10, as in::
subplot(211) # 2 rows, 1 column, first (upper) plot
``subplot(111)`` is the default axis.
New subplots that overlap old will delete the old axes. If you do
not want this behavior, use
:meth:`matplotlib.figure.Figure.add_subplot` or the
:func:`~matplotlib.pyplot.axes` command. Eg.::
from pylab import *
plot([1,2,3]) # implicitly creates subplot(111)
subplot(211) # overlaps, subplot(111) is killed
plot(rand(12), rand(12))
subplot(212, axisbg='y') # creates 2nd subplot with yellow background
Keyword arguments:
*axisbg*:
The background color of the subplot, which can be any valid
color specifier. See :mod:`matplotlib.colors` for more
information.
*polar*:
A boolean flag indicating whether the subplot plot should be
a polar projection. Defaults to False.
*projection*:
A string giving the name of a custom projection to be used
for the subplot. This projection must have been previously
registered. See :func:`matplotlib.projections.register_projection`
.. seealso::
:func:`~matplotlib.pyplot.axes`:
For additional information on :func:`axes` and
:func:`subplot` keyword arguments.
:file:`examples/pylab_examples/polar_scatter.py`
**Example:**
.. plot:: mpl_examples/pylab_examples/subplot_demo.py
"""
fig = gcf()
a = fig.add_subplot(*args, **kwargs)
bbox = a.bbox
byebye = []
for other in fig.axes:
if other==a: continue
if bbox.fully_overlaps(other.bbox):
byebye.append(other)
for ax in byebye: delaxes(ax)
draw_if_interactive()
return a
def twinx(ax=None):
"""
Make a second axes overlay *ax* (or the current axes if *ax* is
*None*) sharing the xaxis. The ticks for *ax2* will be placed on
the right, and the *ax2* instance is returned.
.. seealso::
:file:`examples/api_examples/two_scales.py`
"""
if ax is None:
ax=gca()
ax1 = ax.twinx()
draw_if_interactive()
return ax1
def twiny(ax=None):
"""
Make a second axes overlay *ax* (or the current axes if *ax* is
*None*) sharing the yaxis. The ticks for *ax2* will be placed on
the top, and the *ax2* instance is returned.
"""
if ax is None:
ax=gca()
ax1 = ax.twiny()
draw_if_interactive()
return ax1
def subplots_adjust(*args, **kwargs):
"""
call signature::
subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None)
Tune the subplot layout via the
:class:`matplotlib.figure.SubplotParams` mechanism. The parameter
meanings (and suggested defaults) are::
left = 0.125 # the left side of the subplots of the figure
right = 0.9 # the right side of the subplots of the figure
bottom = 0.1 # the bottom of the subplots of the figure
top = 0.9 # the top of the subplots of the figure
wspace = 0.2 # the amount of width reserved for blank space between subplots
hspace = 0.2 # the amount of height reserved for white space between subplots
The actual defaults are controlled by the rc file
"""
fig = gcf()
fig.subplots_adjust(*args, **kwargs)
draw_if_interactive()
def subplot_tool(targetfig=None):
"""
Launch a subplot tool window for *targetfig* (default gcf).
A :class:`matplotlib.widgets.SubplotTool` instance is returned.
"""
tbar = rcParams['toolbar'] # turn off the navigation toolbar for the toolfig
rcParams['toolbar'] = 'None'
if targetfig is None:
manager = get_current_fig_manager()
targetfig = manager.canvas.figure
else:
# find the manager for this figure
for manager in _pylab_helpers.Gcf._activeQue:
if manager.canvas.figure==targetfig: break
else: raise RuntimeError('Could not find manager for targetfig')
toolfig = figure(figsize=(6,3))
toolfig.subplots_adjust(top=0.9)
ret = SubplotTool(targetfig, toolfig)
rcParams['toolbar'] = tbar
_pylab_helpers.Gcf.set_active(manager) # restore the current figure
return ret
def box(on=None):
"""
Turn the axes box on or off according to *on*.
If *on* is *None*, toggle state.
"""
ax = gca()
if on is None:
on = not ax.get_frame_on()
ax.set_frame_on(on)
draw_if_interactive()
def title(s, *args, **kwargs):
"""
Set the title of the current axis to *s*.
Default font override is::
override = {'fontsize': 'medium',
'verticalalignment': 'bottom',
'horizontalalignment': 'center'}
.. seealso::
:func:`~matplotlib.pyplot.text`:
for information on how override and the optional args work.
"""
l = gca().set_title(s, *args, **kwargs)
draw_if_interactive()
return l
## Axis ##
def axis(*v, **kwargs):
"""
Set/Get the axis properties:
>>> axis()
returns the current axes limits ``[xmin, xmax, ymin, ymax]``.
>>> axis(v)
sets the min and max of the x and y axes, with
``v = [xmin, xmax, ymin, ymax]``.
>>> axis('off')
turns off the axis lines and labels.
>>> axis('equal')
changes limits of *x* or *y* axis so that equal increments of *x*
and *y* have the same length; a circle is circular.
>>> axis('scaled')
achieves the same result by changing the dimensions of the plot box instead
of the axis data limits.
>>> axis('tight')
changes *x* and *y* axis limits such that all data is shown. If
all data is already shown, it will move it to the center of the
figure without modifying (*xmax* - *xmin*) or (*ymax* -
*ymin*). Note this is slightly different than in matlab.
>>> axis('image')
is 'scaled' with the axis limits equal to the data limits.
>>> axis('auto')
and
>>> axis('normal')
are deprecated. They restore default behavior; axis limits are automatically
scaled to make the data fit comfortably within the plot box.
if ``len(*v)==0``, you can pass in *xmin*, *xmax*, *ymin*, *ymax*
as kwargs selectively to alter just those limits without changing
the others.
The xmin, xmax, ymin, ymax tuple is returned
.. seealso::
:func:`xlim`, :func:`ylim`
"""
ax = gca()
v = ax.axis(*v, **kwargs)
draw_if_interactive()
return v
def xlabel(s, *args, **kwargs):
"""
Set the *x* axis label of the current axis to *s*
Default override is::
override = {
'fontsize' : 'small',
'verticalalignment' : 'top',
'horizontalalignment' : 'center'
}
.. seealso::
:func:`~matplotlib.pyplot.text`:
For information on how override and the optional args work
"""
l = gca().set_xlabel(s, *args, **kwargs)
draw_if_interactive()
return l
def ylabel(s, *args, **kwargs):
"""
Set the *y* axis label of the current axis to *s*.
Defaults override is::
override = {
'fontsize' : 'small',
'verticalalignment' : 'center',
'horizontalalignment' : 'right',
'rotation'='vertical' : }
.. seealso::
:func:`~matplotlib.pyplot.text`:
For information on how override and the optional args
work.
"""
l = gca().set_ylabel(s, *args, **kwargs)
draw_if_interactive()
return l
def xlim(*args, **kwargs):
"""
Set/Get the xlimits of the current axes::
xmin, xmax = xlim() # return the current xlim
xlim( (xmin, xmax) ) # set the xlim to xmin, xmax
xlim( xmin, xmax ) # set the xlim to xmin, xmax
If you do not specify args, you can pass the xmin and xmax as
kwargs, eg.::
xlim(xmax=3) # adjust the max leaving min unchanged
xlim(xmin=1) # adjust the min leaving max unchanged
The new axis limits are returned as a length 2 tuple.
"""
ax = gca()
ret = ax.set_xlim(*args, **kwargs)
draw_if_interactive()
return ret
def ylim(*args, **kwargs):
"""
Set/Get the ylimits of the current axes::
ymin, ymax = ylim() # return the current ylim
ylim( (ymin, ymax) ) # set the ylim to ymin, ymax
ylim( ymin, ymax ) # set the ylim to ymin, ymax
If you do not specify args, you can pass the *ymin* and *ymax* as
kwargs, eg.::
ylim(ymax=3) # adjust the max leaving min unchanged
ylim(ymin=1) # adjust the min leaving max unchanged
The new axis limits are returned as a length 2 tuple.
"""
ax = gca()
ret = ax.set_ylim(*args, **kwargs)
draw_if_interactive()
return ret
def xscale(*args, **kwargs):
"""
call signature::
xscale(scale, **kwargs)
Set the scaling for the x-axis: %(scale)s
Different keywords may be accepted, depending on the scale:
%(scale_docs)s
"""
ax = gca()
ret = ax.set_xscale(*args, **kwargs)
draw_if_interactive()
return ret
xscale.__doc__ = dedent(xscale.__doc__) % {
'scale': ' | '.join([repr(_x) for _x in get_scale_names()]),
'scale_docs': get_scale_docs()}
def yscale(*args, **kwargs):
"""
call signature::
xscale(scale, **kwargs)
Set the scaling for the y-axis: %(scale)s
Different keywords may be accepted, depending on the scale:
%(scale_docs)s
"""
ax = gca()
ret = ax.set_yscale(*args, **kwargs)
draw_if_interactive()
return ret
yscale.__doc__ = dedent(yscale.__doc__) % {
'scale': ' | '.join([repr(_x) for _x in get_scale_names()]),
'scale_docs': get_scale_docs()}
def xticks(*args, **kwargs):
"""
Set/Get the xlimits of the current ticklocs and labels::
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = xticks()
# set the locations of the xticks
xticks( arange(6) )
# set the locations and labels of the xticks
xticks( arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue') )
The keyword args, if any, are :class:`~matplotlib.text.Text`
properties.
"""
ax = gca()
if len(args)==0:
locs = ax.get_xticks()
labels = ax.get_xticklabels()
elif len(args)==1:
locs = ax.set_xticks(args[0])
labels = ax.get_xticklabels()
elif len(args)==2:
locs = ax.set_xticks(args[0])
labels = ax.set_xticklabels(args[1], **kwargs)
else: raise TypeError('Illegal number of arguments to xticks')
if len(kwargs):
for l in labels:
l.update(kwargs)
draw_if_interactive()
return locs, silent_list('Text xticklabel', labels)
def yticks(*args, **kwargs):
"""
Set/Get the ylimits of the current ticklocs and labels::
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = yticks()
# set the locations of the yticks
yticks( arange(6) )
# set the locations and labels of the yticks
yticks( arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue') )
The keyword args, if any, are :class:`~matplotlib.text.Text`
properties.
"""
ax = gca()
if len(args)==0:
locs = ax.get_yticks()
labels = ax.get_yticklabels()
elif len(args)==1:
locs = ax.set_yticks(args[0])
labels = ax.get_yticklabels()
elif len(args)==2:
locs = ax.set_yticks(args[0])
labels = ax.set_yticklabels(args[1], **kwargs)
else: raise TypeError('Illegal number of arguments to yticks')
if len(kwargs):
for l in labels:
l.update(kwargs)
draw_if_interactive()
return ( locs,
silent_list('Text yticklabel', labels)
)
def rgrids(*args, **kwargs):
"""
Set/Get the radial locations of the gridlines and ticklabels on a
polar plot.
call signatures::
lines, labels = rgrids()
lines, labels = rgrids(radii, labels=None, angle=22.5, **kwargs)
When called with no arguments, :func:`rgrid` simply returns the
tuple (*lines*, *labels*), where *lines* is an array of radial
gridlines (:class:`~matplotlib.lines.Line2D` instances) and
*labels* is an array of tick labels
(:class:`~matplotlib.text.Text` instances). When called with
arguments, the labels will appear at the specified radial
distances and angles.
*labels*, if not *None*, is a len(*radii*) list of strings of the
labels to use at each angle.
If *labels* is None, the rformatter will be used
Examples::
# set the locations of the radial gridlines and labels
lines, labels = rgrids( (0.25, 0.5, 1.0) )
# set the locations and labels of the radial gridlines and labels
lines, labels = rgrids( (0.25, 0.5, 1.0), ('Tom', 'Dick', 'Harry' )
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('rgrids only defined for polar axes')
if len(args)==0:
lines = ax.yaxis.get_ticklines()
labels = ax.yaxis.get_ticklabels()
else:
lines, labels = ax.set_rgrids(*args, **kwargs)
draw_if_interactive()
return ( silent_list('Line2D rgridline', lines),
silent_list('Text rgridlabel', labels) )
def thetagrids(*args, **kwargs):
"""
Set/Get the theta locations of the gridlines and ticklabels.
If no arguments are passed, return a tuple (*lines*, *labels*)
where *lines* is an array of radial gridlines
(:class:`~matplotlib.lines.Line2D` instances) and *labels* is an
array of tick labels (:class:`~matplotlib.text.Text` instances)::
lines, labels = thetagrids()
Otherwise the syntax is::
lines, labels = thetagrids(angles, labels=None, fmt='%d', frac = 1.1)
set the angles at which to place the theta grids (these gridlines
are equal along the theta dimension).
*angles* is in degrees.
*labels*, if not *None*, is a len(angles) list of strings of the
labels to use at each angle.
If *labels* is *None*, the labels will be ``fmt%angle``.
*frac* is the fraction of the polar axes radius at which to place
the label (1 is the edge). Eg. 1.05 is outside the axes and 0.95
is inside the axes.
Return value is a list of tuples (*lines*, *labels*):
- *lines* are :class:`~matplotlib.lines.Line2D` instances
- *labels* are :class:`~matplotlib.text.Text` instances.
Note that on input, the *labels* argument is a list of strings,
and on output it is a list of :class:`~matplotlib.text.Text`
instances.
Examples::
# set the locations of the radial gridlines and labels
lines, labels = thetagrids( range(45,360,90) )
# set the locations and labels of the radial gridlines and labels
lines, labels = thetagrids( range(45,360,90), ('NE', 'NW', 'SW','SE') )
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('rgrids only defined for polar axes')
if len(args)==0:
lines = ax.xaxis.get_ticklines()
labels = ax.xaxis.get_ticklabels()
else:
lines, labels = ax.set_thetagrids(*args, **kwargs)
draw_if_interactive()
return (silent_list('Line2D thetagridline', lines),
silent_list('Text thetagridlabel', labels)
)
## Plotting Info ##
def plotting():
"""
Plotting commands
=============== =========================================================
Command Description
=============== =========================================================
axes Create a new axes
axis Set or return the current axis limits
bar make a bar chart
boxplot make a box and whiskers chart
cla clear current axes
clabel label a contour plot
clf clear a figure window
close close a figure window
colorbar add a colorbar to the current figure
cohere make a plot of coherence
contour make a contour plot
contourf make a filled contour plot
csd make a plot of cross spectral density
draw force a redraw of the current figure
errorbar make an errorbar graph
figlegend add a legend to the figure
figimage add an image to the figure, w/o resampling
figtext add text in figure coords
figure create or change active figure
fill make filled polygons
fill_between make filled polygons
gca return the current axes
gcf return the current figure
gci get the current image, or None
getp get a handle graphics property
hist make a histogram
hold set the hold state on current axes
legend add a legend to the axes
loglog a log log plot
imread load image file into array
imshow plot image data
matshow display a matrix in a new figure preserving aspect
pcolor make a pseudocolor plot
plot make a line plot
plotfile plot data from a flat file
psd make a plot of power spectral density
quiver make a direction field (arrows) plot
rc control the default params
savefig save the current figure
scatter make a scatter plot
setp set a handle graphics property
semilogx log x axis
semilogy log y axis
show show the figures
specgram a spectrogram plot
stem make a stem plot
subplot make a subplot (numrows, numcols, axesnum)
table add a table to the axes
text add some text at location x,y to the current axes
title add a title to the current axes
xlabel add an xlabel to the current axes
ylabel add a ylabel to the current axes
=============== =========================================================
The following commands will set the default colormap accordingly:
* autumn
* bone
* cool
* copper
* flag
* gray
* hot
* hsv
* jet
* pink
* prism
* spring
* summer
* winter
* spectral
"""
pass
def get_plot_commands(): return ( 'axes', 'axis', 'bar', 'boxplot', 'cla', 'clf',
'close', 'colorbar', 'cohere', 'csd', 'draw', 'errorbar',
'figlegend', 'figtext', 'figimage', 'figure', 'fill', 'gca',
'gcf', 'gci', 'get', 'gray', 'barh', 'jet', 'hist', 'hold', 'imread',
'imshow', 'legend', 'loglog', 'quiver', 'rc', 'pcolor', 'pcolormesh', 'plot', 'psd',
'savefig', 'scatter', 'set', 'semilogx', 'semilogy', 'show',
'specgram', 'stem', 'subplot', 'table', 'text', 'title', 'xlabel',
'ylabel', 'pie', 'polar')
def colors():
"""
This is a do nothing function to provide you with help on how
matplotlib handles colors.
Commands which take color arguments can use several formats to
specify the colors. For the basic builtin colors, you can use a
single letter
===== =======
Alias Color
===== =======
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
===== =======
For a greater range of colors, you have two options. You can
specify the color using an html hex string, as in::
color = '#eeefff'
or you can pass an R,G,B tuple, where each of R,G,B are in the
range [0,1].
You can also use any legal html name for a color, for example::
color = 'red',
color = 'burlywood'
color = 'chartreuse'
The example below creates a subplot with a dark
slate gray background
subplot(111, axisbg=(0.1843, 0.3098, 0.3098))
Here is an example that creates a pale turqoise title::
title('Is this the best color?', color='#afeeee')
"""
pass
def colormaps():
"""
matplotlib provides the following colormaps.
* autumn
* bone
* cool
* copper
* flag
* gray
* hot
* hsv
* jet
* pink
* prism
* spring
* summer
* winter
* spectral
You can set the colormap for an image, pcolor, scatter, etc,
either as a keyword argument::
imshow(X, cmap=cm.hot)
or post-hoc using the corresponding pylab interface function::
imshow(X)
hot()
jet()
In interactive mode, this will update the colormap allowing you to
see which one works best for your data.
"""
pass
## Plotting part 1: manually generated functions and wrappers ##
from matplotlib.colorbar import colorbar_doc
def colorbar(mappable=None, cax=None, ax=None, **kw):
if mappable is None:
mappable = gci()
if ax is None:
ax = gca()
ret = gcf().colorbar(mappable, cax = cax, ax=ax, **kw)
draw_if_interactive()
return ret
colorbar.__doc__ = colorbar_doc
def clim(vmin=None, vmax=None):
"""
Set the color limits of the current image
To apply clim to all axes images do::
clim(0, 0.5)
If either *vmin* or *vmax* is None, the image min/max respectively
will be used for color scaling.
If you want to set the clim of multiple images,
use, for example::
for im in gca().get_images():
im.set_clim(0, 0.05)
"""
im = gci()
if im is None:
raise RuntimeError('You must first define an image, eg with imshow')
im.set_clim(vmin, vmax)
draw_if_interactive()
def imread(*args, **kwargs):
return _imread(*args, **kwargs)
if _imread.__doc__ is not None:
imread.__doc__ = dedent(_imread.__doc__)
def matshow(A, fignum=None, **kw):
"""
Display an array as a matrix in a new figure window.
The origin is set at the upper left hand corner and rows (first
dimension of the array) are displayed horizontally. The aspect
ratio of the figure window is that of the array, unless this would
make an excessively short or narrow figure.
Tick labels for the xaxis are placed on top.
With the exception of fignum, keyword arguments are passed to
:func:`~matplotlib.pyplot.imshow`.
*fignum*: [ None | integer | False ]
By default, :func:`matshow` creates a new figure window with
automatic numbering. If *fignum* is given as an integer, the
created figure will use this figure number. Because of how
:func:`matshow` tries to set the figure aspect ratio to be the
one of the array, if you provide the number of an already
existing figure, strange things may happen.
If *fignum* is *False* or 0, a new figure window will **NOT** be created.
"""
if fignum is False or fignum is 0:
ax = gca()
else:
# Extract actual aspect ratio of array and make appropriately sized figure
fig = figure(fignum, figsize=figaspect(A))
ax = fig.add_axes([0.15, 0.09, 0.775, 0.775])
im = ax.matshow(A, **kw)
gci._current = im
draw_if_interactive()
return im
def polar(*args, **kwargs):
"""
call signature::
polar(theta, r, **kwargs)
Make a polar plot. Multiple *theta*, *r* arguments are supported,
with format strings, as in :func:`~matplotlib.pyplot.plot`.
"""
ax = gca(polar=True)
ret = ax.plot(*args, **kwargs)
draw_if_interactive()
return ret
def plotfile(fname, cols=(0,), plotfuncs=None,
comments='#', skiprows=0, checkrows=5, delimiter=',',
**kwargs):
"""
Plot the data in *fname*
*cols* is a sequence of column identifiers to plot. An identifier
is either an int or a string. If it is an int, it indicates the
column number. If it is a string, it indicates the column header.
matplotlib will make column headers lower case, replace spaces with
underscores, and remove all illegal characters; so ``'Adj Close*'``
will have name ``'adj_close'``.
- If len(*cols*) == 1, only that column will be plotted on the *y* axis.
- If len(*cols*) > 1, the first element will be an identifier for
data for the *x* axis and the remaining elements will be the
column indexes for multiple subplots
*plotfuncs*, if not *None*, is a dictionary mapping identifier to
an :class:`~matplotlib.axes.Axes` plotting function as a string.
Default is 'plot', other choices are 'semilogy', 'fill', 'bar',
etc. You must use the same type of identifier in the *cols*
vector as you use in the *plotfuncs* dictionary, eg., integer
column numbers in both or column names in both.
*comments*, *skiprows*, *checkrows*, and *delimiter* are all passed on to
:func:`matplotlib.pylab.csv2rec` to load the data into a record array.
kwargs are passed on to plotting functions.
Example usage::
# plot the 2nd and 4th column against the 1st in two subplots
plotfile(fname, (0,1,3))
# plot using column names; specify an alternate plot type for volume
plotfile(fname, ('date', 'volume', 'adj_close'), plotfuncs={'volume': 'semilogy'})
"""
fig = figure()
if len(cols)<1:
raise ValueError('must have at least one column of data')
if plotfuncs is None:
plotfuncs = dict()
r = mlab.csv2rec(fname, comments=comments,
skiprows=skiprows, checkrows=checkrows, delimiter=delimiter)
def getname_val(identifier):
'return the name and column data for identifier'
if is_string_like(identifier):
return identifier, r[identifier]
elif is_numlike(identifier):
name = r.dtype.names[int(identifier)]
return name, r[name]
else:
raise TypeError('identifier must be a string or integer')
xname, x = getname_val(cols[0])
if len(cols)==1:
ax1 = fig.add_subplot(1,1,1)
funcname = plotfuncs.get(cols[0], 'plot')
func = getattr(ax1, funcname)
func(x, **kwargs)
ax1.set_xlabel(xname)
else:
N = len(cols)
for i in range(1,N):
if i==1:
ax = ax1 = fig.add_subplot(N-1,1,i)
ax.grid(True)
else:
ax = fig.add_subplot(N-1,1,i, sharex=ax1)
ax.grid(True)
yname, y = getname_val(cols[i])
funcname = plotfuncs.get(cols[i], 'plot')
func = getattr(ax, funcname)
func(x, y, **kwargs)
ax.set_ylabel(yname)
if ax.is_last_row():
ax.set_xlabel(xname)
else:
ax.set_xlabel('')
if xname=='date':
fig.autofmt_xdate()
draw_if_interactive()
## Plotting part 2: autogenerated wrappers for axes methods ##
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def acorr(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().acorr(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.acorr.__doc__ is not None:
acorr.__doc__ = dedent(Axes.acorr.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def arrow(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().arrow(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.arrow.__doc__ is not None:
arrow.__doc__ = dedent(Axes.arrow.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axhline(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axhline(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axhline.__doc__ is not None:
axhline.__doc__ = dedent(Axes.axhline.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axhspan(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axhspan(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axhspan.__doc__ is not None:
axhspan.__doc__ = dedent(Axes.axhspan.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axvline(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axvline(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axvline.__doc__ is not None:
axvline.__doc__ = dedent(Axes.axvline.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axvspan(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axvspan(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axvspan.__doc__ is not None:
axvspan.__doc__ = dedent(Axes.axvspan.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def bar(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().bar(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.bar.__doc__ is not None:
bar.__doc__ = dedent(Axes.bar.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def barh(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().barh(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.barh.__doc__ is not None:
barh.__doc__ = dedent(Axes.barh.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def broken_barh(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().broken_barh(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.broken_barh.__doc__ is not None:
broken_barh.__doc__ = dedent(Axes.broken_barh.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def boxplot(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().boxplot(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.boxplot.__doc__ is not None:
boxplot.__doc__ = dedent(Axes.boxplot.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cohere(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().cohere(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.cohere.__doc__ is not None:
cohere.__doc__ = dedent(Axes.cohere.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def clabel(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().clabel(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.clabel.__doc__ is not None:
clabel.__doc__ = dedent(Axes.clabel.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def contour(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().contour(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
if ret._A is not None: gci._current = ret
hold(b)
return ret
if Axes.contour.__doc__ is not None:
contour.__doc__ = dedent(Axes.contour.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def contourf(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().contourf(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
if ret._A is not None: gci._current = ret
hold(b)
return ret
if Axes.contourf.__doc__ is not None:
contourf.__doc__ = dedent(Axes.contourf.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def csd(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().csd(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.csd.__doc__ is not None:
csd.__doc__ = dedent(Axes.csd.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def errorbar(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().errorbar(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.errorbar.__doc__ is not None:
errorbar.__doc__ = dedent(Axes.errorbar.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def fill(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().fill(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.fill.__doc__ is not None:
fill.__doc__ = dedent(Axes.fill.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def fill_between(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().fill_between(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.fill_between.__doc__ is not None:
fill_between.__doc__ = dedent(Axes.fill_between.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hexbin(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().hexbin(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.hexbin.__doc__ is not None:
hexbin.__doc__ = dedent(Axes.hexbin.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hist(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().hist(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.hist.__doc__ is not None:
hist.__doc__ = dedent(Axes.hist.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hlines(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().hlines(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.hlines.__doc__ is not None:
hlines.__doc__ = dedent(Axes.hlines.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def imshow(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().imshow(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.imshow.__doc__ is not None:
imshow.__doc__ = dedent(Axes.imshow.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def loglog(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().loglog(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.loglog.__doc__ is not None:
loglog.__doc__ = dedent(Axes.loglog.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pcolor(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().pcolor(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.pcolor.__doc__ is not None:
pcolor.__doc__ = dedent(Axes.pcolor.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pcolormesh(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().pcolormesh(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.pcolormesh.__doc__ is not None:
pcolormesh.__doc__ = dedent(Axes.pcolormesh.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pie(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().pie(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.pie.__doc__ is not None:
pie.__doc__ = dedent(Axes.pie.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def plot(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().plot(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.plot.__doc__ is not None:
plot.__doc__ = dedent(Axes.plot.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def plot_date(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().plot_date(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.plot_date.__doc__ is not None:
plot_date.__doc__ = dedent(Axes.plot_date.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def psd(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().psd(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.psd.__doc__ is not None:
psd.__doc__ = dedent(Axes.psd.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def quiver(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().quiver(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.quiver.__doc__ is not None:
quiver.__doc__ = dedent(Axes.quiver.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def quiverkey(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().quiverkey(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.quiverkey.__doc__ is not None:
quiverkey.__doc__ = dedent(Axes.quiverkey.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def scatter(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().scatter(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.scatter.__doc__ is not None:
scatter.__doc__ = dedent(Axes.scatter.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def semilogx(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().semilogx(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.semilogx.__doc__ is not None:
semilogx.__doc__ = dedent(Axes.semilogx.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def semilogy(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().semilogy(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.semilogy.__doc__ is not None:
semilogy.__doc__ = dedent(Axes.semilogy.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def specgram(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().specgram(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret[-1]
hold(b)
return ret
if Axes.specgram.__doc__ is not None:
specgram.__doc__ = dedent(Axes.specgram.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spy(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().spy(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.spy.__doc__ is not None:
spy.__doc__ = dedent(Axes.spy.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def stem(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().stem(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.stem.__doc__ is not None:
stem.__doc__ = dedent(Axes.stem.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def step(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().step(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.step.__doc__ is not None:
step.__doc__ = dedent(Axes.step.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def vlines(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().vlines(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.vlines.__doc__ is not None:
vlines.__doc__ = dedent(Axes.vlines.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def xcorr(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().xcorr(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.xcorr.__doc__ is not None:
xcorr.__doc__ = dedent(Axes.xcorr.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def barbs(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().barbs(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.barbs.__doc__ is not None:
barbs.__doc__ = dedent(Axes.barbs.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cla(*args, **kwargs):
ret = gca().cla(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.cla.__doc__ is not None:
cla.__doc__ = dedent(Axes.cla.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def grid(*args, **kwargs):
ret = gca().grid(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.grid.__doc__ is not None:
grid.__doc__ = dedent(Axes.grid.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def legend(*args, **kwargs):
ret = gca().legend(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.legend.__doc__ is not None:
legend.__doc__ = dedent(Axes.legend.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def table(*args, **kwargs):
ret = gca().table(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.table.__doc__ is not None:
table.__doc__ = dedent(Axes.table.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def text(*args, **kwargs):
ret = gca().text(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.text.__doc__ is not None:
text.__doc__ = dedent(Axes.text.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def annotate(*args, **kwargs):
ret = gca().annotate(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.annotate.__doc__ is not None:
annotate.__doc__ = dedent(Axes.annotate.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def autumn():
'''
set the default colormap to autumn and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='autumn')
im = gci()
if im is not None:
im.set_cmap(cm.autumn)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def bone():
'''
set the default colormap to bone and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='bone')
im = gci()
if im is not None:
im.set_cmap(cm.bone)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cool():
'''
set the default colormap to cool and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='cool')
im = gci()
if im is not None:
im.set_cmap(cm.cool)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def copper():
'''
set the default colormap to copper and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='copper')
im = gci()
if im is not None:
im.set_cmap(cm.copper)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def flag():
'''
set the default colormap to flag and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='flag')
im = gci()
if im is not None:
im.set_cmap(cm.flag)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def gray():
'''
set the default colormap to gray and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='gray')
im = gci()
if im is not None:
im.set_cmap(cm.gray)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hot():
'''
set the default colormap to hot and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='hot')
im = gci()
if im is not None:
im.set_cmap(cm.hot)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hsv():
'''
set the default colormap to hsv and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='hsv')
im = gci()
if im is not None:
im.set_cmap(cm.hsv)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def jet():
'''
set the default colormap to jet and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='jet')
im = gci()
if im is not None:
im.set_cmap(cm.jet)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pink():
'''
set the default colormap to pink and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='pink')
im = gci()
if im is not None:
im.set_cmap(cm.pink)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def prism():
'''
set the default colormap to prism and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='prism')
im = gci()
if im is not None:
im.set_cmap(cm.prism)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spring():
'''
set the default colormap to spring and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='spring')
im = gci()
if im is not None:
im.set_cmap(cm.spring)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def summer():
'''
set the default colormap to summer and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='summer')
im = gci()
if im is not None:
im.set_cmap(cm.summer)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def winter():
'''
set the default colormap to winter and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='winter')
im = gci()
if im is not None:
im.set_cmap(cm.winter)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spectral():
'''
set the default colormap to spectral and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='spectral')
im = gci()
if im is not None:
im.set_cmap(cm.spectral)
draw_if_interactive()
| agpl-3.0 |
kklmn/xrt | xrt/plotter.py | 1 | 82259 | # -*- coding: utf-8 -*-
u"""
Module :mod:`plotter` provides classes describing axes and plots, as well as
containers for the accumulated arrays (histograms) for subsequent
pickling/unpickling or for global flux normalization. The module defines
several constants for default plot positions and sizes. The user may want to
modify them in the module or externally as in the xrt_logo.py example.
.. note::
Each plot has a 2D positional histogram, two 1D positional histograms and,
typically, a 1D color histogram (e.g. energy).
.. warning::
The two 1D positional histograms are not calculated from the 2D one!
In other words, the 1D histograms only respect their corresponding limits
and not the other dimension’s limits. There can be situations when the 2D
image is black because the screen is misplaced but one 1D histogram may
still show a beam distribution if in that direction the screen is
positioned correctly. This was the reason why the 1D histograms were
designed not to be directly dependent on the 2D one – this feature
facilitates the troubleshooting of misalignments. On the other hand, this
behavior may lead to confusion if a part of the 2D distribution is outside
of the visible 2D area. In such cases one or two 1D histograms may show a
wider distribution than the one visible on the 2D image. For correcting
this behavior, one can mask the beam by apertures or by selecting the
physical or optical limits of an optical element.
.. tip::
If you do not want to create plot windows (e.g. when they are too many or
when you run xrt on a remote machine) but only want to save plots, you can
use a non-interactive matplotlib backend such as Agg (for PNGs), PDF, SVG
or PS::
matplotlib.use('agg')
Importantly, this must be done at the very top of your script, right after
import matplotlib and before importing anything else.
"""
from __future__ import unicode_literals
__author__ = "Konstantin Klementiev, Roman Chernikov"
__date__ = "16 Mar 2017"
import os
import copy
import pickle
import numpy as np
import scipy as sp
import matplotlib as mpl
from matplotlib.ticker import MaxNLocator
from . import runner
# from runner import runCardVals, runCardProcs
from .backends import raycing
try:
from .gui.commons import qt
hasQt = True
except ImportError:
hasQt = False
from matplotlib.figure import Figure
try: # for Python 3 compatibility:
unicode = unicode
except NameError:
# 'unicode' is undefined, must be Python 3
unicode = str
basestring = (str, bytes)
else:
# 'unicode' exists, must be Python 2
unicode = unicode
basestring = basestring
# otherwise it does not work correctly on my Ubuntu9.10 and mpl 0.99.1.1:
mpl.rcParams['axes.unicode_minus'] = False
#mpl.rcParams['text.usetex'] = True
#mpl.rcParams['font.family'] = 'serif'
#mpl.rcParams['font.serif'] = 'cm'
mpl.rcParams['axes.linewidth'] = 0.75
#mpl.rcParams['backend'] = 'Qt5agg'
#mpl.rcParams['backend'] = 'Agg'
#mpl.rcParams['xtick.major.pad'] = '5'
#mpl.rcParams['ytick.major.pad'] = '5'
import matplotlib.pyplot as plt
epsHist = 1e-100 # prevents problem with normalization of histograms
# [Sizes and positions of plots]
dpi = 100
xOrigin2d = 80 # all sizes are in pixels
yOrigin2d = 48
space2dto1d = 4
height1d = 84
xspace1dtoE1d = 112
yspace1dtoE1d = 76
heightE1dbar = 10
heightE1d = 84
xSpaceExtraWhenNoEHistogram = 42
xSpaceExtra = 22
ySpaceExtra = 28
# [Sizes and positions of texts]
xlabelpad = 4 # x-axis label to axis
ylabelpad = 4 # y-axis label to axis
xTextPos = 1.02 # 0 to 1 relative to the figure size
yTextPosNrays = 1.0
yTextPosNraysR = 1.32
yTextPosGoodrays = 0.8
yTextPosGoodraysR = 1.1
yTextPosI = 0.58
xTextPosDx = 0.5
yTextPosDx = 1.02
xTextPosDy = 1.05
yTextPosDy = 0.5
xTextPosStatus = 0.999
yTextPosStatus = 0.001
yTextPosNrays1 = 0.88
yTextPosNrays2 = 0.66
yTextPosNrays3 = 0.44
yTextPosNrays4 = 0.22
# [Bins]
defaultBins = 128
defaultPixelPerBin = 2
extraMargin = 4 # bins. Extra margins to histograms when limits are not given.
# [Axis labels]
axisLabelFontSize = 10
defaultXTitle = '$x$'
defaultXUnit = 'mm'
defaultYTitle = '$z$'
defaultYUnit = 'mm'
defaultCTitle = 'energy'
defaultCUnit = 'eV'
defaultFwhmFormatStrForXYAxes = '%.1f'
defaultFwhmFormatStrForCAxis = '%.2f'
# [Development]
colorFactor = 0.85 # 2./3 for red-to-blue
colorSaturation = 0.85
# # end of rc-file ##
def versiontuple(v):
a = v.split(".")
return tuple(map(int, [''.join(c for c in s if c.isdigit()) for s in a]))
if hasQt:
class MyQtFigCanvas(qt.FigCanvas):
windowClosed = qt.pyqtSignal(int)
def __init__(self, figure, xrtplot):
super(MyQtFigCanvas, self).__init__(figure)
self.xrtplot = xrtplot
class XYCAxis(object):
u"""
Contains a generic record structure describing each of the 3 axes:
X, Y and Color (typ. Energy)."""
def __init__(
self, label='', unit='mm', factor=None, data='auto', limits=None,
offset=0, bins=defaultBins, ppb=defaultPixelPerBin,
density='histogram', invertAxis=False, outline=0.5,
fwhmFormatStr=defaultFwhmFormatStrForXYAxes):
u"""
*label*: str
The label of the axis without unit. This label will appear in the
axis caption and in the FWHM label.
*unit*: str
The unit of the axis which will follow the label in parentheses
and appear in the FWHM value
*factor*: float
Useful in order to match your axis units with the units of the
ray tracing backend. For instance, the shadow length unit is cm.
If you want to display the positions as mm: *factor=10*;
if you want to display energy as keV: *factor=1e-3*.
Another usage of *factor* is to bring the coordinates of the ray
tracing backend to the world coordinates. For instance, z-axis in
shadow is directed off the OE surface. If the OE is faced upside
down, z is directed downwards. In order to display it upside, set
minus to *factor*.
if not specified, *factor* will default to a value that depends
on *unit*. See :meth:`def auto_assign_factor`.
*data*: int for shadow, otherwise array-like or function object
shadow:
zero-based index of columns in the shadow binary files:
====== ====================================================
0 x
1 y
2 z
3 x'
4 y'
5 z'
6 Ex s polariz
7 Ey s polariz
8 Ez s polariz
9 lost ray flag
10 photon energy
11 ray index
12 optical path
13 phase (s polarization)
14 phase (p polarization)
15 x component of the electromagnetic vector (p polar)
16 y component of the electromagnetic vector (p polar)
17 z component of the electromagnetic vector (p polar)
18 empty
====== ====================================================
raycing:
use the following functions (in the table below) or pass your
own one. See :mod:`raycing` for more functions, e.g. for the
polarization properties. Alternatively, you may pass an array
of the length of the beam arrays.
======= ===================================================
x raycing.get_x
y raycing.get_y
z raycing.get_z
x' raycing.get_xprime
z' raycing.get_zprime
energy raycing.get_energy
======= ===================================================
If *data* = 'auto' then *label* is searched for "x", "y", "z",
"x'", "z'", "energy" and if one of them is found, *data* is
assigned to the listed above index or function. In raycing backend
the automatic assignment is additionally implemented for *label*
containing 'degree (for degree of polarization)', 'circular' (for
circular polarization rate), 'path', 'incid' or 'theta' (for
incident angle), 'order' (for grating diffraction order), 's',
'phi', 'r' or 's' (for parametric representation of OE).
*limits*: 2-list of floats [min, max]
Axis limits. If None, the *limits* are taken as ``np.min`` and
``np.max`` for the corresponding array acquired after the 1st ray
tracing run. If *limits* == 'symmetric', the limits are forced to
be symmetric about the origin. Can also be set outside of the
constructor as, e.g.::
plot1.xaxis.limits = [-15, 15]
*offset*: float
An offset value subtracted from the axis tick labels to be
displayed separately. It is useful for the energy axis, where the
band width is most frequently much smaller than the central value.
Ignored for x and y axes.
+-----------------+--------------------+
| no offset | non-zero offset |
+=================+====================+
| |image_offset0| | |image_offset5000| |
+-----------------+--------------------+
.. |image_offset0| imagezoom:: _images/offset0.png
:scale: 50 %
.. |image_offset5000| imagezoom:: _images/offset5000.png
:scale: 50 %
*bins*: int
Number of bins in the corresponding 1D and 2D histograms.
See also *ppb* parameter.
*ppb*: int
Screen-pixel-per-bin value. The graph arrangement was optimized
for *bins* * *ppb* = 256. If your *bins* and *ppb* give a very
different product, the graphs may look ugly (disproportional)
with overlapping tick labels.
*density*: 'histogram' or 'kde'
The way the sample density is calculated: by histogram or by kde
[KDE]_.
*invertAxis*: bool
Inverts the axis direction. Useful for energy axis in energy-
dispersive images in order to match the colors of the energy
histogram with the colors of the 2D histogram.
*outline*: float within [0, 1]
Specifies the minimum brightness of the outline drawn over the
1D histogram. The maximum brightness equals 1 at the maximum of
the 1D histogram.
+--------------------+--------------------+--------------------+
| =0 | =0.5 | =1 |
+====================+====================+====================+
| |image_outline0.0| | |image_outline0.5| | |image_outline1.0| |
+--------------------+--------------------+--------------------+
.. |image_outline0.0| imagezoom:: _images/outline00.png
:scale: 50 %
.. |image_outline0.5| imagezoom:: _images/outline05.png
:scale: 50 %
.. |image_outline1.0| imagezoom:: _images/outline10.png
:scale: 50 %
*fwhmFormatStr*: str
Python format string for the FWHM value, e.g. '%.2f'. if None, the
FWHM value is not displayed.
"""
self.label = label
self.unit = unit
if self.label:
self.displayLabel = self.label
else:
self.displayLabel = ''
if self.unit:
self.displayLabel += ' (' + self.unit + ')'
self.factor = factor
self.data = data
self.limits = limits
self.offset = offset
self.offsetDisplayUnit = self.unit
self.offsetDisplayFactor = 1
self.bins = bins
self.ppb = ppb
self.pixels = bins * ppb
self.density = density
self.extraMargin = extraMargin
self.invertAxis = invertAxis
if outline < 0:
outline = 0
if outline > 1:
outline = 1
self.outline = outline
self.fwhmFormatStr = fwhmFormatStr
self.max1D = 0
self.max1D_RGB = 0
self.globalMax1D = 0
self.globalMax1D_RGB = 0
self.useCategory = False
def auto_assign_data(self, backend):
"""
Automatically assign data arrays given the axis label."""
if "energy" in self.label:
if backend == 'shadow':
self.data = 10
elif backend == 'raycing':
self.data = raycing.get_energy
elif "x'" in self.label:
if backend == 'shadow':
self.data = 3
elif backend == 'raycing':
self.data = raycing.get_xprime
elif "z'" in self.label:
if backend == 'shadow':
self.data = 5
elif backend == 'raycing':
self.data = raycing.get_zprime
elif "x" in self.label:
if backend == 'shadow':
self.data = 0
elif backend == 'raycing':
self.data = raycing.get_x
elif "y" in self.label:
if backend == 'shadow':
self.data = 1
elif backend == 'raycing':
self.data = raycing.get_y
elif "z" in self.label:
if backend == 'shadow':
self.data = 2
elif backend == 'raycing':
self.data = raycing.get_z
elif "degree" in self.label:
self.data = raycing.get_polarization_degree
elif "circular" in self.label:
self.data = raycing.get_circular_polarization_rate
elif "incid" in self.label or "theta" in self.label:
self.data = raycing.get_incidence_angle
elif "phi" in self.label:
self.data = raycing.get_phi
elif "order" in self.label:
self.data = raycing.get_order
elif "s" in self.label:
self.data = raycing.get_s
elif "path" in self.label:
self.data = raycing.get_path
elif "r" in self.label:
self.data = raycing.get_r
elif "a" in self.label:
self.data = raycing.get_a
elif "b" in self.label:
self.data = raycing.get_b
else:
raise ValueError(
'cannot auto-assign data for axis "{0}"!'.format(self.label))
def auto_assign_factor(self, backend):
"""
Automatically assign factor given the axis label."""
factor = 1.
if self.unit in ['keV', ]:
factor = 1e-3
elif self.unit in ['mrad', 'meV']:
factor = 1.0e3
elif self.unit in [u'$\mu$rad', u'µrad', u'urad']:
factor = 1.0e6
else:
if backend == 'shadow':
if self.unit in ['m', ]:
factor = 1e-2
elif self.unit in ['mm', ]:
factor = 10.
elif self.unit in [u'$\mu$m', u'µm', 'um']:
factor = 1.0e4
elif self.unit in ['nm', ]:
factor = 1.0e7
elif backend == 'raycing':
if self.unit in ['m', ]:
factor = 1e-3
elif self.unit in ['mm', ]:
factor = 1.
elif self.unit in [u'$\mu$m', u'µm', 'um']:
factor = 1.0e3
elif self.unit in ['nm', ]:
factor = 1.0e6
elif self.unit in ['pm', ]:
factor = 1.0e9
elif self.unit in ['fm', ]:
factor = 1.0e12
elif self.unit.startswith('deg'):
factor = np.degrees(1)
elif self.unit.startswith('mdeg'):
factor = np.degrees(1)*1e3
self.factor = factor
class XYCPlot(object):
u"""
Container for the accumulated histograms. Besides giving the beam
images, this class provides with useful fields like *dx*, *dy*, *dE*
(FWHM), *cx*, *cy*, *cE* (centers) and *intensity* which can be used in
scripts for producing scan-like results."""
def __init__(
self, beam=None, rayFlag=(1,), xaxis=None, yaxis=None, caxis=None,
aspect='equal', xPos=1, yPos=1, ePos=1, title='',
invertColorMap=False, negative=False,
fluxKind='total', fluxUnit='auto',
fluxFormatStr='auto', contourLevels=None, contourColors=None,
contourFmt='%.1f', contourFactor=1., saveName=None,
persistentName=None, oe=None, raycingParam=0,
beamState=None, beamC=None, useQtWidget=False):
u"""
*beam*: str
The beam to be visualized.
In raycing backend:
The key in the dictionary returned by
:func:`~xrt.backends.raycing.run.run_process()`. The values of
that dictionary are beams (instances of
:class:`~xrt.backends.raycing.sources.Beam`).
In shadow backend:
The Shadow output file (``star.NN``, `mirr.NN`` or
``screen.NNMM``). It will also appear in the window caption
unless *title* parameter overrides it.
This parameter is used for the automatic determination of the
backend in use with the corresponding meaning of the next two
parameters. If *beam* contains a dot, shadow backend is assumed.
Otherwise raycing backend is assumed.
*rayFlag*: int or tuple of ints
shadow: 0=lost rays, 1=good rays, 2=all rays.
raycing: a tuple of integer ray states: 1=good, 2=out, 3=over,
4=alive (good + out), -NN = dead at oe number NN (numbering starts
with 1).
*xaxis*, *yaxis*, *caxis*: instance of :class:`XYCAxis` or None.
If None, a default axis is created. If caxis='category' and the
backend is raycing, then the coloring is given by ray category, the
color axis histogram is not displayed and *ePos* is ignored.
.. warning::
The axes contain arrays for the accumulation of histograms. If
you create the axes outside of the plot constructor then make
sure that these are not used for another plot. Otherwise the
histograms will be overwritten!
*aspect*: str or float
Aspect ratio of the 2D histogram, = 'equal', 'auto' or numeric
value (=x/y). *aspect* =1 is the same as *aspect* ='equal'.
*xPos*, *yPos*: int
If non-zero, the corresponding 1D histograms are visible.
*ePos*: int
Flag for specifying the positioning of the color axis histogram:
+-------------------------+---------------------------------------+
| *ePos* =1: at the right | |image_ePos1| |
| (default, as usually | |
| the diffraction plane | |
| is vertical) | |
+-------------------------+---------------------------------------+
| *ePos* =2: at the top | |image_ePos2| |
| (for horizontal | |
| diffraction plane) | |
+-------------------------+---------------------------------------+
| *ePos* =0: no | |image_ePos0| |
| color axis histogram | |
+-------------------------+---------------------------------------+
.. |image_ePos1| imagezoom:: _images/ePos=1.png
:scale: 50 %
.. |image_ePos2| imagezoom:: _images/ePos=2.png
:scale: 50 %
.. |image_ePos0| imagezoom:: _images/ePos=0.png
:scale: 50 %
*title*: str
If non-empty, this string will appear in the window caption,
otherwise the *beam* will be used for this.
*invertColorMap*: bool
Inverts colors in the HSV color map; seen differently, this is a
0.5 circular shift in the color map space. This inversion is
useful in combination with *negative* in order to keep the same
energy coloring both for black and for white images.
*negative*: bool
Useful for printing in order to save black inks.
See also *invertColorMap*.
* =False: black bknd for on-screen presentation
* =True: white bknd for paper printing
The following table demonstrates the combinations of
*invertColorMap* and *negative*:
+-------------+-------------------------+-------------------------+
| | *invertColorMap* | *invertColorMap* |
| | =False | =True |
+=============+=========================+=========================+
| *negative* | |image00| | |image10| |
| =False | | |
+-------------+-------------------------+-------------------------+
| *negative* | |image01| | |image11| |
| =True | | |
+-------------+-------------------------+-------------------------+
.. |image00| imagezoom:: _images/invertColorMap=0_negative=0.png
:scale: 50 %
.. |image01| imagezoom:: _images/invertColorMap=0_negative=1.png
:scale: 50 %
.. |image10| imagezoom:: _images/invertColorMap=1_negative=0.png
:scale: 50 %
.. |image11| imagezoom:: _images/invertColorMap=1_negative=1.png
:scale: 50 %
Note that *negative* inverts only the colors of the graphs, not
the white global background. Use a common graphical editor to
invert the whole picture after doing *negative=True*:
.. imagezoom:: _images/negative=1+fullNegative.png
:scale: 50 %
(such a picture would nicely look on a black journal cover, e.g.
on that of Journal of Synchrotron Radiation ;) )
.. _fluxKind:
*fluxKind*: str
Can begin with 's', 'p', '+-45', 'left-right', 'total', 'power',
'Es', 'Ep' and 'E'. Specifies what kind of flux to use for the
brightness of 2D and for the height of 1D histograms. If it ends
with 'log', the flux scale is logarithmic.
If starts with 'E' then the *field amplitude* or mutual intensity
is considered, not the usual intensity, and accumulated in the 2D
histogram or in a 3D stack:
- If ends with 'xx' or 'zz', the corresponding 2D cuts of mutual
intensity are accumulated in the main 2D array (the one visible
as a 2D histogram). The plot must have equal axes.
- If ends with '4D', the complete mutual intensity is calculated
and stored in *plot.total4D* with the shape
(xaxis.bins*yaxis.bins, xaxis.bins*yaxis.bins).
.. warning::
Be cautious with the size of the mutual intensity object, it is
four-dimensional!
- If ends with 'PCA', the field images are stored in *plot.field3D*
with the shape (repeats, xaxis.bins, yaxis.bins) for further
Principal Component Analysis.
- If without these endings, the field aplitudes are simply summed
in the 2D histogram.
*fluxUnit*: 'auto' or None
If a synchrotron source is used and *fluxUnit* is 'auto', the
flux will be displayed as 'ph/s' or 'W' (if *fluxKind* == 'power').
Otherwise the flux is a unitless number of rays times
transmittivity | reflectivity.
*fluxFormatStr*: str
Format string for representing the flux or power. You can use a
representation with powers of ten by utilizing 'p' as format
specifier, e.g. '%.2p'.
*contourLevels*: sequence
A sequence of levels on the 2D image for drawing the contours, in
[0, 1] range. If None, the contours are not drawn.
*contourColors*: sequence or color
A sequence of colors corresponding to *contourLevels*. A single
color value is applied to all the contours. If None, the colors are
automatic.
*contourFmt*: str
Python format string for contour values.
*contourFactor*: float
Is applied to the levels and is useful in combination with
*contourFmt*, e.g. *contourFmt* = r'%.1f mW/mm$^2$',
*contourFactor* = 1e3.
*saveName*: str or list of str or None
Save file name(s). The file type(s) are given by extensions:
png, ps, svg, pdf. Typically, *saveName* is set outside of the
constructor. For example::
filename = 'filt%04imum' %thick #without extension
plot1.saveName = [filename + '.pdf', filename + '.png']
.. _persistentName:
*persistentName*: str or None
File name for reading and storing the accumulated histograms and
other ancillary data. Ray tracing will resume the histogramming
from the state when the persistent file was written. If the file
does not exist yet, the histograms are initialized to zeros. The
persistent file is rewritten when ray tracing is completed and
the number of repeats > 0.
.. warning::
Be careful when you use it: if you intend to start from zeros,
make sure that this option is switched off or the pickle files
do not exist! Otherwise you do resume, not really start anew.
if *persistentName* ends with '.mat', a Matlab file is generated.
*oe*: instance of an optical element or None
If supplied, the rectangular or circular areas of the optical
surfaces or physical surfaces, if the optical surfaces are not
specified, will be overdrawn. Useful with raycing backend for
footprint images.
*raycingParam*: int
Used together with the *oe* parameter above for drawing footprint
envelopes. If =2, the limits of the second crystal of DCM are taken
for drawing the envelope; if =1000, all facets of a diced crystal
are displayed.
*beamState*: str
Used in raycing backend. If not None, gives another beam that
determines the state (good, lost etc.) instead of the state given
by *beam*. This may be used to visualize the *incoming* beam but
use the states of the *outgoing* beam, so that you see how the beam
upstream of the optical element will be masked by it. See the
examples for capillaries.
*beamC*: str
The same as *beamState* but refers to colors (when not of
'category' type).
"""
if not hasQt:
useQtWidget = False
if not useQtWidget:
plt.ion()
self.colorSaturation = colorSaturation
self.beam = beam # binary shadow image: star, mirr or screen
if beam is None:
self.backend = 'raycing'
elif '.' in beam:
self.backend = 'shadow'
elif ('dummy' in beam) or (beam == ''):
self.backend = 'dummy'
elif isinstance(rayFlag, (tuple, list)):
self.backend = 'raycing'
else:
self.backend = 'dummy'
self.beamState = beamState
self.beamC = beamC
self.rayFlag = rayFlag
self.fluxKind = fluxKind
self.fluxUnit = fluxUnit
if xaxis is None:
self.xaxis = XYCAxis(defaultXTitle, defaultXUnit)
else:
self.xaxis = xaxis
if yaxis is None:
self.yaxis = XYCAxis(defaultYTitle, defaultYUnit)
else:
self.yaxis = yaxis
if (caxis is None) or isinstance(caxis, basestring):
self.caxis = XYCAxis(defaultCTitle, defaultCUnit, factor=1.,)
self.caxis.fwhmFormatStr = defaultFwhmFormatStrForCAxis
if isinstance(caxis, basestring):
self.caxis.useCategory = True
ePos = 0
else:
self.caxis = caxis
if self.backend != 'dummy':
for axis in self.xaxis, self.yaxis, self.caxis:
if axis.data == 'auto':
axis.auto_assign_data(self.backend)
if axis.factor is None:
axis.auto_assign_factor(self.backend)
self.reset_bins2D()
if isinstance(aspect, (int, float)):
if aspect <= 0:
aspect = 1.
self.aspect = aspect
self.dpi = dpi
self.ePos = ePos # Position of E histogram, 1=right, 2=top, 0=none
self.negative = negative
if self.negative:
facecolor = 'w' # white
else:
facecolor = 'k' # black
# MatplotlibDeprecationWarning: The axisbg attribute was deprecated in
# version 2.0. Use facecolor instead.
kwmpl = {}
if versiontuple(mpl.__version__) >= versiontuple("2.0.0"):
kwmpl['facecolor'] = facecolor
else:
kwmpl['axisbg'] = facecolor
self.invertColorMap = invertColorMap
self.utilityInvertColorMap = False
self.fluxFormatStr = fluxFormatStr
self.saveName = saveName
self.persistentName = persistentName
self.cx, self.dx = 0, 0
self.cy, self.dy = 0, 0
self.cE, self.dE = 0, 0
xFigSize = float(xOrigin2d + self.xaxis.pixels + space2dto1d +
height1d + xSpaceExtra)
yFigSize = float(yOrigin2d + self.yaxis.pixels + space2dto1d +
height1d + ySpaceExtra)
if self.ePos == 1:
xFigSize += xspace1dtoE1d + heightE1d + heightE1dbar
elif self.ePos == 2:
yFigSize += yspace1dtoE1d + heightE1d + heightE1dbar
if self.ePos != 1:
xFigSize += xSpaceExtraWhenNoEHistogram
if useQtWidget:
self.fig = Figure(figsize=(xFigSize/dpi, yFigSize/dpi), dpi=dpi)
else:
self.fig = plt.figure(figsize=(xFigSize/dpi, yFigSize/dpi),
dpi=dpi)
self.local_size_inches = self.fig.get_size_inches()
self.fig.delaxes(self.fig.gca())
if title != '':
self.title = title
elif isinstance(beam, basestring):
self.title = beam
else:
self.title = ' '
if useQtWidget:
self.canvas = MyQtFigCanvas(figure=self.fig, xrtplot=self)
self.fig.canvas.set_window_title(self.title)
if plt.get_backend().lower() in (
x.lower() for x in mpl.rcsetup.non_interactive_bk):
xExtra = 0 # mpl backend-dependent (don't know why) pixel sizes
yExtra = 0 # mpl backend-dependent (don't know why) pixel sizes
else: # interactive backends:
if True: # runner.runCardVals.repeats > 1:
xExtra = 0
yExtra = 2
else:
xExtra = 0
yExtra = 0
frameon = True
rect2d = [xOrigin2d / xFigSize, yOrigin2d / yFigSize,
(self.xaxis.pixels-1+xExtra) / xFigSize,
(self.yaxis.pixels-1+yExtra) / yFigSize]
self.ax2dHist = self.fig.add_axes(
rect2d, aspect=aspect, xlabel=self.xaxis.displayLabel,
ylabel=self.yaxis.displayLabel, autoscale_on=False,
frameon=frameon, **kwmpl)
self.ax2dHist.xaxis.labelpad = xlabelpad
self.ax2dHist.yaxis.labelpad = ylabelpad
rect1dX = copy.deepcopy(rect2d)
rect1dX[1] = rect2d[1] + rect2d[3] + space2dto1d/yFigSize
rect1dX[3] = height1d / yFigSize
self.ax1dHistX = self.fig.add_axes(
rect1dX, sharex=self.ax2dHist, autoscale_on=False, frameon=frameon,
visible=(xPos != 0), **kwmpl)
rect1dY = copy.deepcopy(rect2d)
rect1dY[0] = rect2d[0] + rect2d[2] + space2dto1d/xFigSize
rect1dY[2] = height1d / xFigSize
self.ax1dHistY = self.fig.add_axes(
rect1dY, sharey=self.ax2dHist, autoscale_on=False, frameon=frameon,
visible=(yPos != 0), **kwmpl)
# make some labels invisible
pset = plt.setp
pset(
self.ax1dHistX.get_xticklabels() +
self.ax1dHistX.get_yticklabels() +
self.ax1dHistY.get_xticklabels() +
self.ax1dHistY.get_yticklabels(),
visible=False)
self.ax1dHistX.set_yticks([])
self.ax1dHistY.set_xticks([])
self.ax1dHistX.xaxis.set_major_formatter(mpl.ticker.ScalarFormatter(
useOffset=False))
self.ax1dHistY.yaxis.set_major_formatter(mpl.ticker.ScalarFormatter(
useOffset=False))
# for tick in (self.ax2dHist.xaxis.get_major_ticks() + \
# self.ax2dHist.yaxis.get_major_ticks()):
# tick.label1.set_fontsize(axisLabelFontSize)
self.ax1dHistXOffset = self.fig.text(
rect1dY[0]+rect1dY[2], 0.01, '', ha='right', va='bottom',
color='gray') # , fontweight='bold')
self.ax1dHistYOffset = self.fig.text(
0.01, rect1dX[1]+rect1dX[3], '', rotation=90, ha='left', va='top',
color='gray') # , fontweight='bold')
if self.ePos == 1: # right
rect1dE = copy.deepcopy(rect1dY)
rect1dE[0] = rect1dY[0] + rect1dY[2] + xspace1dtoE1d/xFigSize
rect1dE[2] = heightE1dbar / xFigSize
rect1dE[3] *= float(self.caxis.pixels) / self.yaxis.pixels
self.ax1dHistEbar = self.fig.add_axes(
rect1dE, ylabel=self.caxis.displayLabel, autoscale_on=False,
frameon=frameon, **kwmpl)
self.ax1dHistEbar.yaxis.labelpad = xlabelpad
self.ax1dHistEOffset = self.fig.text(
rect1dE[0], rect1dE[1]+rect1dE[3], '', ha='left', va='bottom',
color='g') # , fontweight='bold')
rect1dE[0] += rect1dE[2]
rect1dE[2] = heightE1d / xFigSize
self.ax1dHistE = self.fig.add_axes(
rect1dE, sharey=self.ax1dHistEbar, autoscale_on=False,
frameon=frameon, **kwmpl)
pset(
self.ax1dHistEbar.get_xticklabels() +
self.ax1dHistE.get_xticklabels() +
self.ax1dHistE.get_yticklabels(), visible=False)
pset(self.ax1dHistEbar, xticks=())
self.ax1dHistE.yaxis.set_major_formatter(
mpl.ticker.ScalarFormatter(useOffset=False))
if self.caxis.limits is not None:
self.ax1dHistE.set_ylim(self.caxis.limits)
self.ax1dHistE.set_xticks([])
elif self.ePos == 2: # top
rect1dE = copy.deepcopy(rect1dX)
rect1dE[1] = rect1dX[1] + rect1dX[3] + yspace1dtoE1d/yFigSize
rect1dE[3] = heightE1dbar / yFigSize
rect1dE[2] *= float(self.caxis.pixels) / self.xaxis.pixels
self.ax1dHistEbar = self.fig.add_axes(
rect1dE, xlabel=self.caxis.displayLabel, autoscale_on=False,
frameon=frameon, **kwmpl)
self.ax1dHistEbar.xaxis.labelpad = xlabelpad
self.ax1dHistEOffset = self.fig.text(
rect1dE[0]+rect1dE[2]+0.01, rect1dE[1]-0.01, '',
ha='left', va='top', color='g') # , fontweight='bold')
rect1dE[1] += rect1dE[3]
rect1dE[3] = heightE1d / yFigSize
self.ax1dHistE = self.fig.add_axes(
rect1dE, sharex=self.ax1dHistEbar, autoscale_on=False,
frameon=frameon, **kwmpl)
pset(
self.ax1dHistEbar.get_yticklabels() +
self.ax1dHistE.get_yticklabels() +
self.ax1dHistE.get_xticklabels(), visible=False)
pset(self.ax1dHistEbar, yticks=())
self.ax1dHistE.xaxis.set_major_formatter(
mpl.ticker.ScalarFormatter(useOffset=False))
if self.caxis.limits is not None:
self.ax1dHistE.set_xlim(self.caxis.limits)
self.ax1dHistE.set_yticks([])
allAxes = [self.ax1dHistX, self.ax1dHistY, self.ax2dHist]
if self.ePos != 0:
allAxes.append(self.ax1dHistE)
allAxes.append(self.ax1dHistEbar)
for ax in allAxes:
for axXY in (ax.xaxis, ax.yaxis):
for line in axXY.get_ticklines():
line.set_color('grey')
mplTxt = self.ax1dHistX.text if useQtWidget else plt.text
if self.ePos == 1:
self.textDE = mplTxt(
xTextPosDy, yTextPosDy, ' ', rotation='vertical',
transform=self.ax1dHistE.transAxes, ha='left', va='center')
elif self.ePos == 2:
self.textDE = mplTxt(
xTextPosDx, yTextPosDx, ' ',
transform=self.ax1dHistE.transAxes, ha='center', va='bottom')
self.nRaysAll = np.long(0)
self.nRaysAllRestored = np.long(-1)
self.intensity = 0.
transform = self.ax1dHistX.transAxes
self.textGoodrays = None
self.textI = None
self.power = 0.
self.flux = 0.
self.contourLevels = contourLevels
self.contourColors = contourColors
self.contourFmt = contourFmt
self.contourFactor = contourFactor
self.displayAsAbsorbedPower = False
self.textNrays = None
if self.backend == 'shadow' or self.backend == 'dummy':
self.textNrays = mplTxt(
xTextPos, yTextPosNrays, ' ', transform=transform, ha='left',
va='top')
self.nRaysNeeded = np.long(0)
if self.rayFlag != 2:
self.textGoodrays = mplTxt(
xTextPos, yTextPosGoodrays, ' ', transform=transform,
ha='left', va='top')
self.textI = mplTxt(
xTextPos, yTextPosI, ' ', transform=transform, ha='left',
va='top')
elif self.backend == 'raycing':
# =0: ignored, =1: good,
# =2: reflected outside of working area, =3: transmitted without
# intersection
# =-NN: lost (absorbed) at OE#NN-OE numbering starts from 1 !!!
# If NN>1000 then
# the slit with ordinal number NN-1000 is meant.
self.nRaysAlive = np.long(0)
self.nRaysGood = np.long(0)
self.nRaysOut = np.long(0)
self.nRaysOver = np.long(0)
self.nRaysDead = np.long(0)
self.nRaysAccepted = np.long(0)
self.nRaysAcceptedE = 0.
self.nRaysSeeded = np.long(0)
self.nRaysSeededI = 0.
self.textNrays = mplTxt(
xTextPos, yTextPosNraysR, ' ', transform=transform, ha='left',
va='top')
self.textGood = None
self.textOut = None
self.textOver = None
self.textAlive = None
self.textDead = None
if 1 in self.rayFlag:
self.textGood = mplTxt(
xTextPos, yTextPosNrays1, ' ', transform=transform,
ha='left', va='top')
if 2 in self.rayFlag:
self.textOut = mplTxt(
xTextPos, yTextPosNrays2, ' ', transform=transform,
ha='left', va='top')
if 3 in self.rayFlag:
self.textOver = mplTxt(
xTextPos, yTextPosNrays3, ' ', transform=transform,
ha='left', va='top')
if 4 in self.rayFlag:
self.textAlive = mplTxt(
xTextPos, yTextPosGoodraysR, ' ', transform=transform,
ha='left', va='top')
if not self.caxis.useCategory:
self.textI = mplTxt(
xTextPos, yTextPosNrays4, ' ', transform=transform,
ha='left', va='top')
else:
if (np.array(self.rayFlag) < 0).sum() > 0:
self.textDead = mplTxt(
xTextPos, yTextPosNrays4, ' ', transform=transform,
ha='left', va='top')
self.textDx = mplTxt(
xTextPosDx, yTextPosDx, ' ', transform=self.ax1dHistX.transAxes,
ha='center', va='bottom')
self.textDy = mplTxt(
xTextPosDy, yTextPosDy, ' ', rotation='vertical',
transform=self.ax1dHistY.transAxes, ha='left', va='center')
self.textStatus = mplTxt(
xTextPosStatus, yTextPosStatus, '', transform=self.fig.transFigure,
ha='right', va='bottom', fontsize=9)
self.textStatus.set_color('r')
self.ax1dHistX.imshow(
np.zeros((2, 2, 3)), aspect='auto', interpolation='nearest',
origin='lower', figure=self.fig)
self.ax1dHistY.imshow(
np.zeros((2, 2, 3)), aspect='auto', interpolation='nearest',
origin='lower', figure=self.fig)
if self.ePos != 0:
self.ax1dHistE.imshow(
np.zeros((2, 2, 3)), aspect='auto', interpolation='nearest',
origin='lower', figure=self.fig)
self.ax1dHistEbar.imshow(
np.zeros((2, 2, 3)), aspect='auto', interpolation='nearest',
origin='lower', figure=self.fig)
self.ax2dHist.imshow(
np.zeros((2, 2, 3)), aspect=self.aspect, interpolation='nearest',
origin='lower', figure=self.fig)
self.contours2D = None
self.oe = oe
self.oeSurfaceLabels = []
self.raycingParam = raycingParam
self.draw_footprint_area()
if self.xaxis.limits is not None:
if not isinstance(self.xaxis.limits, str):
self.ax2dHist.set_xlim(self.xaxis.limits)
self.ax1dHistX.set_xlim(self.xaxis.limits)
if self.yaxis.limits is not None:
if not isinstance(self.yaxis.limits, str):
self.ax2dHist.set_ylim(self.yaxis.limits)
self.ax1dHistY.set_ylim(self.yaxis.limits)
self.cidp = self.fig.canvas.mpl_connect(
'button_press_event', self.on_press)
if not useQtWidget:
plt.ioff()
self.fig.canvas.draw()
def reset_bins2D(self):
if self.fluxKind.startswith('E'):
dtype = np.complex128
else:
dtype = np.float64
self.total2D = np.zeros((self.yaxis.bins, self.xaxis.bins),
dtype=dtype)
self.total2D_RGB = np.zeros((self.yaxis.bins, self.xaxis.bins, 3))
self.max2D_RGB = 0
self.globalMax2D_RGB = 0
self.size2D = self.yaxis.bins * self.xaxis.bins
self.is4D = self.fluxKind.lower().endswith('4d')
if self.is4D:
self.total4D = np.zeros((self.size2D, self.size2D), dtype=dtype)
self.isPCA = self.fluxKind.lower().endswith('pca')
if self.isPCA:
self.total4D = []
for ax in [self.xaxis, self.yaxis, self.caxis]:
if isinstance(ax, XYCAxis):
ax.binEdges = np.zeros(ax.bins + 1)
ax.total1D = np.zeros(ax.bins)
ax.total1D_RGB = np.zeros((ax.bins, 3))
def update_user_elements(self):
return # 'user message'
def clean_user_elements(self):
pass
def on_press(self, event):
"""
Defines the right button click event for stopping the loop.
"""
if event.button == 3:
runner.runCardVals.stop_event.set()
self.textStatus.set_text("stopping ...")
def timer_callback(self, evt=None):
"""
This code will be executed on every timer tick. We have to start
:meth:`runner.dispatch_jobs` here as otherwise we cannot force the
redrawing.
"""
if self.areProcessAlreadyRunning:
return
self.areProcessAlreadyRunning = True
runner.dispatch_jobs()
def set_axes_limits(self, xmin, xmax, ymin, ymax, emin, emax):
"""
Used in multiprocessing for automatic limits of the 3 axes: x, y and
energy (caxis). It is meant only for the 1st ray tracing run.
"""
# if (self.xaxis.limits is None) or isinstance(self.xaxis.limits, str):
# the check is not needed: even if the limits have been already set, they may
# change due to *aspect*; this is checked in :mod:`multipro`.
self.xaxis.limits = [xmin, xmax]
self.yaxis.limits = [ymin, ymax]
self.caxis.limits = [emin, emax]
def draw_footprint_area(self):
"""
Useful with raycing backend for footprint images.
"""
if self.oe is None:
return
if self.oe.surface is None:
return
if isinstance(self.oe.surface, basestring):
surface = self.oe.surface,
else:
surface = self.oe.surface
if len(self.oeSurfaceLabels) > 0:
for isurf, surf in enumerate(surface):
self.oeSurfaceLabels[isurf].set_text(surf)
return
r = [0, 0, 0, 0]
if self.raycingParam == 2: # the second crystal of DCM
limsPhys = self.oe.limPhysX2, self.oe.limPhysY2
limsOpt = self.oe.limOptX2, self.oe.limOptY2
elif (self.raycingParam >= 1000) and hasattr(self.oe, "xStep"):
# all facets of a diced crystal
if self.oe.limPhysX[1] == np.inf:
return
if self.oe.limPhysY[1] == np.inf:
return
if self.xaxis.limits is None:
return
if self.yaxis.limits is None:
return
ixMin = int(round(max(self.oe.limPhysX[0], self.xaxis.limits[0]) /
self.oe.xStep))
ixMax = int(round(min(self.oe.limPhysX[1], self.xaxis.limits[1]) /
self.oe.xStep))
iyMin = int(round(max(self.oe.limPhysY[0], self.yaxis.limits[0]) /
self.oe.yStep))
iyMax = int(round(min(self.oe.limPhysY[1], self.yaxis.limits[1]) /
self.oe.yStep))
surface = []
limFacetXMin, limFacetXMax = [], []
limFacetYMin, limFacetYMax = [], []
for ix in range(ixMin, ixMax+1):
for iy in range(iyMin, iyMax+1):
surface.append('')
cx = ix * self.oe.xStep
cy = iy * self.oe.yStep
dxHalf = self.oe.dxFacet / 2
dyHalf = self.oe.dyFacet / 2
limFacetXMin.append(max(cx-dxHalf, self.oe.limPhysX[0]))
limFacetXMax.append(min(cx+dxHalf, self.oe.limPhysX[1]))
limFacetYMin.append(max(cy-dyHalf, self.oe.limPhysY[0]))
limFacetYMax.append(min(cy+dyHalf, self.oe.limPhysY[1]))
limsPhys = \
(limFacetXMin, limFacetXMax), (limFacetYMin, limFacetYMax)
limsOpt = None, None
else:
limsPhys = self.oe.limPhysX, self.oe.limPhysY
limsOpt = self.oe.limOptX, self.oe.limOptY
for isurf, surf in enumerate(surface):
for ilim1, ilim2, limPhys, limOpt in zip(
(0, 2), (1, 3), limsPhys, limsOpt):
if limOpt is not None:
if raycing.is_sequence(limOpt[0]):
r[ilim1], r[ilim2] = limOpt[0][isurf], limOpt[1][isurf]
else:
r[ilim1], r[ilim2] = limOpt[0], limOpt[1]
else:
if raycing.is_sequence(limPhys[0]):
r[ilim1], r[ilim2] = \
limPhys[0][isurf], limPhys[1][isurf]
else:
r[ilim1], r[ilim2] = limPhys[0], limPhys[1]
r[0] *= self.xaxis.factor
r[1] *= self.xaxis.factor
r[2] *= self.yaxis.factor
r[3] *= self.yaxis.factor
if isinstance(self.oe.shape, (str, unicode)):
if self.oe.shape.startswith('ro') and\
(self.raycingParam < 1000):
envelope = mpl.patches.Circle(
((r[1]+r[0])*0.5, (r[3]+r[2])*0.5), (r[1]-r[0])*0.5,
fc="#aaaaaa", lw=0, alpha=0.25)
elif self.oe.shape.startswith('rect') or\
(self.raycingParam >= 1000):
envelope = mpl.patches.Rectangle(
(r[0], r[2]), r[1] - r[0], r[3] - r[2],
fc="#aaaaaa", lw=0, alpha=0.25)
elif isinstance(self.oe.shape, list):
envelope = mpl.patches.Polygon(self.oe.shape, closed=True,
fc="#aaaaaa", lw=0, alpha=0.25)
self.ax2dHist.add_patch(envelope)
if self.raycingParam < 1000:
if self.yaxis.limits is not None:
yTextPos = max(r[2], self.yaxis.limits[0])
else:
yTextPos = r[2]
osl = self.ax2dHist.text(
(r[0]+r[1]) * 0.5, yTextPos, surf, ha='center',
va='top', color='w')
self.oeSurfaceLabels.append(osl)
def plot_hist1d(self, what_axis_char):
"""Plots the specified 1D histogram as imshow and calculates FWHM with
showing the ends of the FWHM bar.
Parameters:
*what_axis_char*: str [ 'x' | 'y' | 'c' ]
defines the axis
Returns:
*center*, *fwhm*: floats
the center and fwhm values for later displaying.
"""
if what_axis_char == 'x':
axis = self.xaxis
graph = self.ax1dHistX
orientation = 'horizontal'
histoPixelHeight = height1d
offsetText = self.ax1dHistXOffset
elif what_axis_char == 'y':
axis = self.yaxis
graph = self.ax1dHistY
orientation = 'vertical'
histoPixelHeight = height1d
offsetText = self.ax1dHistYOffset
elif what_axis_char == 'c':
axis = self.caxis
graph = self.ax1dHistE
if self.ePos == 1:
orientation = 'vertical'
elif self.ePos == 2:
orientation = 'horizontal'
offsetText = self.ax1dHistEOffset
histoPixelHeight = heightE1d
t1D = axis.total1D
axis.max1D = float(np.max(t1D))
if axis.max1D > epsHist:
if runner.runCardVals.passNo > 0:
mult = 1.0 / axis.globalMax1D
else:
mult = 1.0 / axis.max1D
xx = t1D * mult
else:
xx = t1D
if runner.runCardVals.passNo > 0:
xxMaxHalf = float(np.max(xx)) * 0.5 # for calculating FWHM
else:
xxMaxHalf = 0.5
t1D_RGB = axis.total1D_RGB
axis.max1D_RGB = float(np.max(t1D_RGB))
if axis.max1D_RGB > epsHist:
if runner.runCardVals.passNo > 1:
mult = 1.0 / axis.globalMax1D_RGB
else:
mult = 1.0 / axis.max1D_RGB
xxRGB = t1D_RGB * mult
else:
xxRGB = t1D_RGB
if orientation[0] == 'h':
map2d = np.zeros((histoPixelHeight, len(xx), 3))
for ix, cx in enumerate(xx):
maxPixel = int(round((histoPixelHeight-1) * cx))
if 0 <= maxPixel <= (histoPixelHeight-1):
map2d[0:maxPixel, ix, :] = xxRGB[ix, :]
if axis.outline:
maxRGB = np.max(xxRGB[ix, :])
if maxRGB > 1e-20:
scaleFactor = \
1 - axis.outline + axis.outline/maxRGB
map2d[maxPixel-1, ix, :] *= scaleFactor
extent = None
if (axis.limits is not None) and\
(not isinstance(axis.limits, str)):
ll = [l-axis.offset for l in axis.limits]
extent = [ll[0], ll[1], 0, 1]
elif orientation[0] == 'v':
map2d = np.zeros((len(xx), histoPixelHeight, 3))
for ix, cx in enumerate(xx):
maxPixel = int(round((histoPixelHeight-1) * cx))
if 0 <= maxPixel <= (histoPixelHeight-1):
map2d[ix, 0:maxPixel, :] = xxRGB[ix, :]
if axis.outline:
maxRGB = np.max(xxRGB[ix, :])
if maxRGB > 1e-20:
scaleFactor = \
1 - axis.outline + axis.outline/maxRGB
map2d[ix, maxPixel-1, :] *= scaleFactor
extent = None
if (axis.limits is not None) and \
not (isinstance(axis.limits, str)):
ll = [l-axis.offset for l in axis.limits]
extent = [0, 1, ll[0], ll[1]]
if self.negative:
map2d = 1 - map2d
if self.utilityInvertColorMap:
map2d = mpl.colors.rgb_to_hsv(map2d)
map2d[:, :, 0] -= 0.5
map2d[map2d < 0] += 1
map2d = mpl.colors.hsv_to_rgb(map2d)
graph.images[0].set_data(map2d)
if extent is not None:
graph.images[0].set_extent(extent)
del graph.lines[:] # otherwise it accumulates the FWHM lines
if axis.max1D > 0:
args = np.argwhere(xx >= xxMaxHalf)
iHistFWHMlow = np.min(args)
iHistFWHMhigh = np.max(args) + 1
histFWHMlow = axis.binEdges[iHistFWHMlow] - axis.offset
histFWHMhigh = axis.binEdges[iHistFWHMhigh] - axis.offset
if axis.fwhmFormatStr is not None:
if orientation[0] == 'h':
graph.plot([histFWHMlow, histFWHMhigh],
[xxMaxHalf, xxMaxHalf], '+', color='grey')
elif orientation[0] == 'v':
graph.plot([xxMaxHalf, xxMaxHalf],
[histFWHMlow, histFWHMhigh], '+', color='grey')
else:
histFWHMlow = 0
histFWHMhigh = 0
if axis.offset:
ll = [l-axis.offset for l in axis.limits]
offsetText.set_text('{0}{1:g} {2}'.format(
'+' if axis.offset > 0 else '',
axis.offset*axis.offsetDisplayFactor, axis.offsetDisplayUnit))
offsetText.set_visible(True)
else:
ll = axis.limits
offsetText.set_visible(False)
if orientation[0] == 'h':
if not isinstance(axis.limits, str):
graph.set_xlim(ll)
graph.set_ylim([0, 1])
elif orientation[0] == 'v':
graph.set_xlim([0, 1])
if not isinstance(axis.limits, str):
graph.set_ylim(ll)
axis.binCenters = (axis.binEdges[:-1]+axis.binEdges[1:]) * 0.5
weighted1D = axis.total1D * axis.binCenters
xxAve = axis.total1D.sum()
if xxAve != 0:
xxAve = weighted1D.sum() / xxAve
return xxAve, histFWHMhigh - histFWHMlow
def plot_colorbar(self):
"""
Plots a color bar adjacent to the caxis 1D histogram.
"""
a = np.linspace(0, colorFactor, self.caxis.pixels, endpoint=True)
a = np.asarray(a).reshape(1, -1)
if self.invertColorMap:
a -= 0.5
a[a < 0] += 1
if self.caxis.limits is None:
return
eMin, eMax = [l-self.caxis.offset for l in self.caxis.limits]
a = np.vstack((a, a))
if self.ePos == 1:
a = a.T
extent = [0, 1, eMin, eMax]
else:
extent = [eMin, eMax, 0, 1]
a = np.dstack(
(a, np.ones_like(a) * self.colorSaturation, np.ones_like(a)))
a = mpl.colors.hsv_to_rgb(a)
if self.negative:
a = 1 - a
self.ax1dHistEbar.images[0].set_data(a)
self.ax1dHistEbar.images[0].set_extent(extent)
if self.caxis.invertAxis:
if self.ePos == 2:
self.ax1dHistEbar.set_xlim(self.ax1dHistEbar.get_xlim()[::-1])
elif self.ePos == 1:
self.ax1dHistEbar.set_ylim(self.ax1dHistEbar.get_ylim()[::-1])
def plot_hist2d(self):
"""
Plots the 2D histogram as imshow.
"""
tRGB = self.total2D_RGB
self.max2D_RGB = float(np.max(tRGB))
if self.max2D_RGB > 0:
if runner.runCardVals.passNo > 1:
mult = 1.0 / self.globalMax2D_RGB
else:
mult = 1.0 / self.max2D_RGB
xyRGB = tRGB * mult
else:
xyRGB = tRGB
if self.negative:
xyRGB = 1 - xyRGB
if self.utilityInvertColorMap:
xyRGB = mpl.colors.rgb_to_hsv(xyRGB)
xyRGB[:, :, 0] -= 0.5
xyRGB[xyRGB < 0] += 1
xyRGB = mpl.colors.hsv_to_rgb(xyRGB)
xyRGB[xyRGB < 0] = 0
xyRGB[xyRGB > 1] = 1
# #test:
# xyRGB[:,:,:]=0
# xyRGB[1::2,1::2,0]=1
extent = None
if (self.xaxis.limits is not None) and (self.yaxis.limits is not None):
if (not isinstance(self.xaxis.limits, str)) and\
(not isinstance(self.yaxis.limits, str)):
extent = [self.xaxis.limits[0]-self.xaxis.offset,
self.xaxis.limits[1]-self.xaxis.offset,
self.yaxis.limits[0]-self.yaxis.offset,
self.yaxis.limits[1]-self.yaxis.offset]
self.ax2dHist.images[0].set_data(xyRGB)
if extent is not None:
self.ax2dHist.images[0].set_extent(extent)
if self.xaxis.invertAxis:
self.ax2dHist.set_xlim(self.ax2dHist.get_xlim()[::-1])
if self.yaxis.invertAxis:
self.ax2dHist.set_ylim(self.ax2dHist.get_ylim()[::-1])
if self.contourLevels is not None:
if self.contours2D is not None:
for c in self.contours2D.collections:
try:
self.ax2dHist.collections.remove(c)
except ValueError:
pass
self.ax2dHist.artists = []
dx = float(self.xaxis.limits[1]-self.xaxis.limits[0]) /\
self.xaxis.bins
dy = float(self.yaxis.limits[1]-self.yaxis.limits[0]) /\
self.yaxis.bins
if dx == 0:
dx = 1.
if dy == 0:
dy = 1.
x = np.linspace(
self.xaxis.limits[0] + dx/2, self.xaxis.limits[1] - dx/2,
self.xaxis.bins)
y = np.linspace(
self.yaxis.limits[0] + dy/2, self.yaxis.limits[1] - dy/2,
self.yaxis.bins)
X, Y = np.meshgrid(x, y)
norm = self.nRaysAll * dx * dy
if norm > 0:
Z = copy.copy(self.total2D) / norm
Z = sp.ndimage.filters.gaussian_filter(Z, 3, mode='nearest')\
* self.contourFactor
self.contourMax = np.max(Z)
if True: # self.contourMax > 1e-4:
contourLevels =\
[l*self.contourMax for l in self.contourLevels]
self.contours2D = self.ax2dHist.contour(
X, Y, Z, levels=contourLevels,
colors=self.contourColors)
self.ax2dHist.clabel(
self.contours2D, fmt=self.contourFmt, inline=True,
fontsize=10)
def textFWHM(self, axis, textD, average, hwhm):
"""Updates the text field that has average of the *axis* plus-minus the
HWHM value."""
deltaStr = axis.label + '$ = $' + axis.fwhmFormatStr +\
r'$\pm$' + axis.fwhmFormatStr + ' %s'
textD.set_text(deltaStr % (average, hwhm, axis.unit))
def _pow10(self, x, digits=1):
"""
Returns a string representation of the scientific notation of the given
number formatted for use with LaTeX or Mathtext, with specified number
of significant decimal digits.
"""
x = float(x)
if (x <= 0) or np.isnan(x).any():
return '0'
exponent = int(np.floor(np.log10(abs(x))))
coeff = np.round(x / float(10**exponent), digits)
return r"{0:.{2}f}$\cdot$10$^{{{1:d}}}$".format(
coeff, exponent, digits)
# def _round_to_n(self, x, n):
# """Round x to n significant figures"""
# return round(x, -int(np.floor(np.sign(x) * np.log10(abs(x)))) + n)
#
# def _str_fmt10(self, x, n=2):
# " Format x into nice Latex rounding to n"
# if x <= 0: return "0"
# try:
# power = int(np.log10(self._round_to_n(x, 0)))
# f_SF = self._round_to_n(x, n) * pow(10, -power)
# except OverflowError:
# return "0"
# return r"{0}$\cdot$10$^{{{1}}}$".format(f_SF, power)
def _get_flux(self):
self.flux = float(self.intensity) / self.nRaysAll *\
self.nRaysSeededI / self.nRaysSeeded
def _get_power(self):
self.power = self.intensity / self.nRaysAll
def plot_plots(self):
"""
Does all graphics update.
"""
self.cx, self.dx = self.plot_hist1d('x')
self.cy, self.dy = self.plot_hist1d('y')
if self.ePos != 0:
self.cE, self.dE = self.plot_hist1d('c')
self.plot_colorbar()
if self.caxis.fwhmFormatStr is not None:
self.textFWHM(self.caxis, self.textDE, self.cE, self.dE/2)
self.plot_hist2d()
if self.textNrays:
self.textNrays.set_text(r'$N_{\rm all} = $%s' % self.nRaysAll)
if self.textGoodrays:
if (runner.runCardVals.backend == 'shadow'):
strDict = {0: r'lost', 1: r'good'}
self.textGoodrays.set_text(
''.join([r'$N_{\rm ', strDict[self.rayFlag[0]],
r'} = $%s']) % self.nRaysNeeded)
if self.textI:
if self.fluxFormatStr == 'auto':
cond = (self.fluxUnit is None) or \
self.fluxKind.startswith('power')
if (runner.runCardVals.backend == 'raycing'):
cond = cond or (self.nRaysSeeded == 0)
if cond:
fluxFormatStr = '%g'
else:
fluxFormatStr = '%.2p'
else:
fluxFormatStr = self.fluxFormatStr
isPowerOfTen = False
if fluxFormatStr.endswith('p'):
pos = fluxFormatStr.find('.')
if 0 < pos+1 < len(fluxFormatStr):
isPowerOfTen = True
powerOfTenDecN = int(fluxFormatStr[pos+1])
if (runner.runCardVals.backend == 'raycing'):
for iTextPanel, iEnergy, iN, substr in zip(
[self.textGood, self.textOut, self.textOver, self.textAlive,
self.textDead],
[raycing.hueGood, raycing.hueOut, raycing.hueOver, 0,
raycing.hueDead],
[self.nRaysGood, self.nRaysOut, self.nRaysOver,
self.nRaysAlive, self.nRaysDead],
['good', 'out', 'over', 'alive', 'dead']):
if iTextPanel is not None:
iTextPanel.set_text(''.join(
[r'$N_{\rm ', substr, r'} = $%s']) % iN)
if self.caxis.useCategory:
eMin, eMax = self.caxis.limits
if iEnergy == 0:
color = 'black'
else:
hue = (iEnergy-eMin) / (eMax-eMin) * colorFactor
# hue = iEnergy / 10.0 * colorFactor
color = np.dstack((hue, 1, 1))
color = \
mpl.colors.hsv_to_rgb(color)[0, :].reshape(3, )
iTextPanel.set_color(color)
if self.textI:
if (self.fluxUnit is None) or (self.nRaysSeeded == 0):
intensityStr = r'$\Phi = $'
if isPowerOfTen:
intensityStr += self._pow10(
self.intensity, powerOfTenDecN)
else:
intensityStr += fluxFormatStr % self.intensity
self.textI.set_text(intensityStr)
else:
if self.fluxKind.startswith('power'):
if self.nRaysAll > 0:
self._get_power()
if self.displayAsAbsorbedPower:
powerStr2 = r'P$_{\rm abs} = $'
else:
powerStr2 = r'P$_{\rm tot} = $'
powerStr = powerStr2 + fluxFormatStr + ' W'
self.textI.set_text(powerStr % self.power)
else:
if (self.nRaysAll > 0) and (self.nRaysSeeded > 0):
self._get_flux()
if isPowerOfTen:
intensityStr = self._pow10(
self.flux, powerOfTenDecN)
else:
intensityStr = fluxFormatStr % self.flux
intensityStr = \
r'$\Phi = ${0} ph/s'.format(intensityStr)
self.textI.set_text(intensityStr)
self.update_user_elements()
if (runner.runCardVals.backend == 'shadow'):
if self.textI:
intensityStr = r'$I = $'
if isPowerOfTen:
intensityStr += self._pow10(
self.intensity, powerOfTenDecN)
else:
intensityStr += fluxFormatStr % self.intensity
self.textI.set_text(intensityStr)
if self.xaxis.fwhmFormatStr is not None:
self.textFWHM(self.xaxis, self.textDx, self.cx, self.dx/2)
if self.yaxis.fwhmFormatStr is not None:
self.textFWHM(self.yaxis, self.textDy, self.cy, self.dy/2)
self.fig.canvas.draw()
def save(self, suffix=''):
"""
Saves matplotlib figures with the *suffix* appended to the file name(s)
in front of the extension.
"""
if self.saveName is None:
return
if isinstance(self.saveName, basestring):
fileList = [self.saveName, ]
else: # fileList is a sequence
fileList = self.saveName
for aName in fileList:
(fileBaseName, fileExtension) = os.path.splitext(aName)
saveName = ''.join([fileBaseName, suffix, fileExtension])
self.fig.savefig(saveName, dpi=self.dpi)
# otherwise mpl qt backend wants to change it (only in Windows):
self.fig.set_size_inches(self.local_size_inches)
self.fig.canvas.draw()
def clean_plots(self):
"""
Cleans the graph in order to prepare it for the next ray tracing.
"""
runner.runCardVals.iteration = 0
runner.runCardVals.stop_event.clear()
runner.runCardVals.finished_event.clear()
for axis in [self.xaxis, self.yaxis, self.caxis]:
axis.total1D[:] = np.zeros(axis.bins)
axis.total1D_RGB[:] = np.zeros((axis.bins, 3))
self.total2D[:] = np.zeros((self.yaxis.bins, self.xaxis.bins))
self.total2D_RGB[:] = np.zeros((self.yaxis.bins, self.xaxis.bins, 3))
if self.is4D:
if self.fluxKind.startswith('E'):
dtype = np.complex128
else:
dtype = np.float64
self.total4D[:] = np.zeros((self.size2D, self.size2D), dtype=dtype)
elif self.isPCA:
self.total4D = []
try:
self.fig.canvas.window().setWindowTitle(self.title)
except AttributeError:
pass
self.nRaysAll = np.long(0)
self.nRaysAllRestored = np.long(-1)
self.nRaysAccepted = np.long(0)
self.nRaysAcceptedE = 0.
self.nRaysSeeded = np.long(0)
self.nRaysSeededI = 0.
self.intensity = 0.
self.cidp = self.fig.canvas.mpl_connect(
'button_press_event', self.on_press)
self.fig.canvas.draw()
if self.ePos != 0:
if self.caxis.fwhmFormatStr is not None:
self.textDE.set_text('')
self.textNrays.set_text('')
if self.backend == 'shadow':
self.nRaysNeeded = np.long(0)
if self.textGoodrays is not None:
self.textGoodrays.set_text('')
if self.backend == 'raycing':
self.nRaysAlive = np.long(0)
self.nRaysGood = np.long(0)
self.nRaysOut = np.long(0)
self.nRaysOver = np.long(0)
self.nRaysDead = np.long(0)
if self.textGood is not None:
self.textGood.set_text('')
if self.textOut is not None:
self.textOut.set_text('')
if self.textOver is not None:
self.textOver.set_text('')
if self.textAlive is not None:
self.textAlive.set_text('')
if self.textDead is not None:
self.textDead.set_text('')
if self.textI:
self.textI.set_text('')
if self.xaxis.fwhmFormatStr is not None:
self.textDx.set_text('')
if self.yaxis.fwhmFormatStr is not None:
self.textDy.set_text('')
self.clean_user_elements()
if self.contours2D is not None:
self.contours2D.collections = []
self.ax2dHist.collections = []
self.plot_plots()
def set_negative(self):
"""
Utility function. Makes all plots in the graph negative (in color).
"""
self.negative = not self.negative
if self.negative:
facecolor = 'w' # previously - axisbg (depreceted)
else:
facecolor = 'k'
axesList = [self.ax2dHist, self.ax1dHistX, self.ax1dHistY]
if self.ePos != 0:
axesList.append(self.ax1dHistE)
axesList.append(self.ax1dHistEbar)
for axes in axesList:
axes.set_axis_bgcolor(facecolor)
self.plot_plots()
def set_invert_colors(self):
"""
Utility function. Inverts the color map.
"""
self.invertColorMap = not self.invertColorMap # this variable is used
# at the time of handling the ray-tracing arrays, as it is cheaper
# there but needs an additional inversion at the time of plotting if
# requested by user.
self.utilityInvertColorMap = not self.utilityInvertColorMap # this
# variable is used at the time of plotting
self.plot_plots()
def card_copy(self):
"""
Returns a minimum set of properties (a "card") describing the plot.
Used for passing it to a new process or thread.
"""
return PlotCard2Pickle(self)
def store_plots(self):
"""
Pickles the accumulated arrays (histograms) and values (like flux) into
the binary file *persistentName*.
"""
saved = SaveResults(self)
if runner.runCardVals.globalNorm:
runner.runCardVals.savedResults.append(saved)
if self.persistentName and (self.nRaysAll > self.nRaysAllRestored):
if raycing.is_sequence(self.persistentName):
pn = self.persistentName[0]
else:
pn = self.persistentName
if pn.endswith('mat'):
import scipy.io as io
#if os.path.isfile(self.persistentName):
# os.remove(self.persistentName)
io.savemat(pn, vars(saved))
else:
f = open(pn, 'wb')
pickle.dump(saved, f, protocol=2)
f.close()
def restore_plots(self):
"""
Restores itself from a file, if possible.
"""
try:
if self.persistentName:
if raycing.is_sequence(self.persistentName):
pns = self.persistentName
else:
pns = self.persistentName,
for pn in pns:
if pn.endswith('mat'):
import scipy.io as io
saved_dic = {}
io.loadmat(pn, saved_dic)
saved = SaveResults(self)
saved.__dict__.update(saved_dic)
else:
pickleFile = open(pn, 'rb')
saved = pickle.load(pickleFile)
pickleFile.close()
saved.restore(self)
if True: # _DEBUG:
print('persistentName=', self.persistentName)
print('saved nRaysAll=', self.nRaysAll)
except (IOError, TypeError):
pass
class XYCPlotWithNumerOfReflections(XYCPlot):
def update_user_elements(self):
if not hasattr(self, 'ax1dHistE'):
return
if not hasattr(self, 'textUser'):
self.textUser = []
else:
self.ax1dHistE.texts[:] = [t for t in self.ax1dHistE.texts
if t not in self.textUser]
del self.textUser[:]
bins = self.caxis.total1D.nonzero()[0]
self.ax1dHistE.yaxis.set_major_locator(MaxNLocator(integer=True))
yPrev = -1e3
fontSize = 8
for i, b in enumerate(bins):
binVal = int(round(abs(
self.caxis.binEdges[b]+self.caxis.binEdges[b+1]) / 2))
textOut = ' n({0:.0f})={1:.1%}'.format(
binVal, self.caxis.total1D[b] / self.intensity)
y = self.caxis.binEdges[b+1] if i < (len(bins)-1) else\
self.caxis.binEdges[b]
tr = self.ax1dHistE.transData.transform
if abs(tr((0, y))[1] - tr((0, yPrev))[1]) < fontSize:
continue
yPrev = y
color = self.caxis.total1D_RGB[b] / max(self.caxis.total1D_RGB[b])
# va = 'bottom' if binVal < self.caxis.limits[1] else 'top'
va = 'bottom' if i < (len(bins) - 1) else 'top'
myText = self.ax1dHistE.text(
0, y, textOut, ha='left', va=va, size=fontSize, color=color)
self.textUser.append(myText)
def clean_user_elements(self):
if hasattr(self, 'textUser'):
self.ax1dHistE.texts[:] = [t for t in self.ax1dHistE.texts
if t not in self.textUser]
del self.textUser[:]
class PlotCard2Pickle(object):
"""
Container for a minimum set of properties (a "card") describing the plot.
Used for passing it to a new process or thread. Must be pickleable.
"""
def __init__(self, plot):
self.xaxis = plot.xaxis
self.yaxis = plot.yaxis
self.caxis = plot.caxis
self.aspect = plot.aspect
self.beam = plot.beam
self.beamState = plot.beamState
self.beamC = plot.beamC
self.rayFlag = plot.rayFlag
self.invertColorMap = plot.invertColorMap
self.ePos = plot.ePos
self.colorFactor = colorFactor
self.colorSaturation = colorSaturation
self.fluxKind = plot.fluxKind
self.title = plot.title
class SaveResults(object):
"""
Container for the accumulated arrays (histograms) and values (like flux)
for subsequent pickling/unpickling or for global flux normalization.
"""
def __init__(self, plot):
"""
Stores the arrays and values and finds the global histogram maxima.
"""
self.xtotal1D = copy.copy(plot.xaxis.total1D)
self.xtotal1D_RGB = copy.copy(plot.xaxis.total1D_RGB)
self.ytotal1D = copy.copy(plot.yaxis.total1D)
self.ytotal1D_RGB = copy.copy(plot.yaxis.total1D_RGB)
self.etotal1D = copy.copy(plot.caxis.total1D)
self.etotal1D_RGB = copy.copy(plot.caxis.total1D_RGB)
self.total2D = copy.copy(plot.total2D)
self.total2D_RGB = copy.copy(plot.total2D_RGB)
axes = [plot.xaxis, plot.yaxis]
if plot.ePos:
axes.append(plot.caxis)
self.cE, self.dE = copy.copy(plot.cE), copy.copy(plot.dE)
self.cx, self.dx = copy.copy(plot.cx), copy.copy(plot.dx)
self.cy, self.dy = copy.copy(plot.cy), copy.copy(plot.dy)
for axis in axes:
if axis.globalMax1D < axis.max1D:
axis.globalMax1D = axis.max1D
if axis.globalMax1D_RGB < axis.max1D_RGB:
axis.globalMax1D_RGB = axis.max1D_RGB
if plot.globalMax2D_RGB < plot.max2D_RGB:
plot.globalMax2D_RGB = plot.max2D_RGB
self.nRaysAll = copy.copy(plot.nRaysAll)
self.intensity = copy.copy(plot.intensity)
if plot.backend == 'shadow':
self.nRaysNeeded = copy.copy(plot.nRaysNeeded)
elif plot.backend == 'raycing':
self.nRaysAlive = copy.copy(plot.nRaysAlive)
self.nRaysGood = copy.copy(plot.nRaysGood)
self.nRaysOut = copy.copy(plot.nRaysOut)
self.nRaysOver = copy.copy(plot.nRaysOver)
self.nRaysDead = copy.copy(plot.nRaysDead)
if (plot.nRaysSeeded > 0):
self.nRaysAccepted = copy.copy(plot.nRaysAccepted)
self.nRaysAcceptedE = copy.copy(plot.nRaysAcceptedE)
self.nRaysSeeded = copy.copy(plot.nRaysSeeded)
self.nRaysSeededI = copy.copy(plot.nRaysSeededI)
self.flux = copy.copy(plot.flux)
self.power = copy.copy(plot.power)
self.xlimits = copy.copy(plot.xaxis.limits)
self.ylimits = copy.copy(plot.yaxis.limits)
self.elimits = copy.copy(plot.caxis.limits)
self.xbinEdges = copy.copy(plot.xaxis.binEdges)
self.ybinEdges = copy.copy(plot.yaxis.binEdges)
self.ebinEdges = copy.copy(plot.caxis.binEdges)
self.fluxKind = copy.copy(plot.fluxKind)
def restore(self, plot):
"""
Restores the arrays and values after unpickling or after running the
ray-tracing series and finding the global histogram maxima.
"""
# squeeze is needed even for floats,
# otherwise for matlab it is returned as [[value]]
plot.xaxis.total1D += np.squeeze(self.xtotal1D)
plot.xaxis.total1D_RGB += np.squeeze(self.xtotal1D_RGB)
plot.yaxis.total1D += np.squeeze(self.ytotal1D)
plot.yaxis.total1D_RGB += np.squeeze(self.ytotal1D_RGB)
plot.caxis.total1D += np.squeeze(self.etotal1D)
plot.caxis.total1D_RGB += np.squeeze(self.etotal1D_RGB)
plot.total2D += np.squeeze(self.total2D)
plot.total2D_RGB += np.squeeze(self.total2D_RGB)
plot.nRaysAll += np.squeeze(self.nRaysAll)
plot.nRaysAllRestored += np.squeeze(self.nRaysAll)
plot.intensity += np.squeeze(self.intensity)
if plot.backend == 'shadow':
plot.nRaysNeeded += np.squeeze(self.nRaysNeeded)
elif plot.backend == 'raycing':
plot.nRaysAlive += np.squeeze(self.nRaysAlive)
plot.nRaysGood += np.squeeze(self.nRaysGood)
plot.nRaysOut += np.squeeze(self.nRaysOut)
plot.nRaysOver += np.squeeze(self.nRaysOver)
plot.nRaysDead += np.squeeze(self.nRaysDead)
if hasattr(self, 'nRaysSeeded'):
if self.nRaysSeeded > 0:
plot.nRaysAccepted += np.squeeze(self.nRaysAccepted)
plot.nRaysAcceptedE += np.squeeze(self.nRaysAcceptedE)
plot.nRaysSeeded += np.squeeze(self.nRaysSeeded)
plot.nRaysSeededI += np.squeeze(self.nRaysSeededI)
plot.xaxis.limits = np.copy(np.squeeze(self.xlimits))
plot.yaxis.limits = np.copy(np.squeeze(self.ylimits))
plot.caxis.limits = np.copy(np.squeeze(self.elimits))
plot.xaxis.binEdges = np.copy(np.squeeze(self.xbinEdges))
plot.yaxis.binEdges = np.copy(np.squeeze(self.ybinEdges))
plot.caxis.binEdges = np.copy(np.squeeze(self.ebinEdges))
plot.fluxKind = np.array_str(np.copy(np.squeeze(self.fluxKind)))
# def __getstate__(self):
# odict = self.__dict__.copy() # copy the dict since we change it
# del odict['plot'] # remove plot reference, it cannot be pickled
# return odict
| mit |
hlin117/scikit-learn | sklearn/linear_model/omp.py | 7 | 31863 | """Orthogonal matching pursuit algorithms
"""
# Author: Vlad Niculae
#
# License: BSD 3 clause
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from ..utils import as_float_array, check_array, check_X_y
from ..model_selection import check_cv
from ..externals.joblib import Parallel, delayed
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
# check_finite=False is an optimization available only in scipy >=0.12
solve_triangular_args = {'check_finite': False}
premature = """ Orthogonal matching pursuit ended prematurely due to linear
dependence in the dictionary. The requested precision might not have been met.
"""
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : array, shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : array, shape (n_samples,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coef : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
if copy_X:
X = X.copy('F')
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
potrs, = get_lapack_funcs(('potrs',), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=X.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=X.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
copy_Gram=True, copy_Xy=True, return_path=False):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the Cholesky decomposition method.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data matrix
Xy : array, shape (n_features,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol_0 : float
Squared norm of y, required if tol is not None.
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coefs : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if copy_Xy:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
potrs, = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=Gram.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
# selected same atom twice, or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
L[n_active, n_active] = np.sqrt(1 - v)
Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
indices[n_active], indices[lam] = indices[lam], indices[n_active]
Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if abs(tol_curr) <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False,
copy_X=True, return_path=False,
return_n_iter=False):
"""Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems.
An instance of the problem has the form:
When parametrized by the number of non-zero coefficients using
`n_nonzero_coefs`:
argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
When parametrized by error using the parameter `tol`:
argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
X : array, shape (n_samples, n_features)
Input data. Columns are assumed to have unit norm.
y : array, shape (n_samples,) or (n_samples, n_targets)
Input targets
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
precompute : {True, False, 'auto'},
Whether to perform precomputations. Improves performance when n_targets
or n_samples is very large.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp_gram
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in S. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
X = check_array(X, order='F', copy=copy_X)
copy_X = False
if y.ndim == 1:
y = y.reshape(-1, 1)
y = check_array(y)
if y.shape[1] > 1: # subsequent targets will be affected
copy_X = True
if n_nonzero_coefs is None and tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > X.shape[1]:
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if precompute == 'auto':
precompute = X.shape[0] > X.shape[1]
if precompute:
G = np.dot(X.T, X)
G = np.asfortranarray(G)
Xy = np.dot(X.T, y)
if tol is not None:
norms_squared = np.sum((y ** 2), axis=0)
else:
norms_squared = None
return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared,
copy_Gram=copy_X, copy_Xy=False,
return_path=return_path)
if return_path:
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
else:
coef = np.zeros((X.shape[1], y.shape[1]))
n_iters = []
for k in range(y.shape[1]):
out = _cholesky_omp(
X, y[:, k], n_nonzero_coefs, tol,
copy_X=copy_X, return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if y.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None,
norms_squared=None, copy_Gram=True,
copy_Xy=True, return_path=False,
return_n_iter=False):
"""Gram Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data: X.T * X
Xy : array, shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared : array-like, shape (n_targets,)
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = check_array(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
# or subsequent target will be affected
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order '
'to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)))
else:
coef = np.zeros((len(Gram), Xy.shape[1]))
n_iters = []
for k in range(Xy.shape[1]):
out = _gram_omp(
Gram, Xy[:, k], n_nonzero_coefs,
norms_squared[k] if tol is not None else None, tol,
copy_Gram=copy_Gram, copy_Xy=copy_Xy,
return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
class OrthogonalMatchingPursuit(LinearModel, RegressorMixin):
"""Orthogonal Matching Pursuit model (OMP)
Parameters
----------
n_nonzero_coefs : int, optional
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, optional
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : {True, False, 'auto'}, default 'auto'
Whether to use a precomputed Gram and Xy matrix to speed up
calculations. Improves performance when `n_targets` or `n_samples` is
very large. Note that if you already have such matrices, you can pass
them directly to the fit method.
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
parameter vector (w in the formula)
intercept_ : float or array, shape (n_targets,)
independent term in decision function.
n_iter_ : int or array-like
Number of active features across every target.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
decomposition.sparse_encode
"""
def __init__(self, n_nonzero_coefs=None, tol=None, fit_intercept=True,
normalize=True, precompute='auto'):
self.n_nonzero_coefs = n_nonzero_coefs
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
n_features = X.shape[1]
X, y, X_offset, y_offset, X_scale, Gram, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
coef_, self.n_iter_ = orthogonal_mp(
X, y, self.n_nonzero_coefs_, self.tol,
precompute=False, copy_X=True,
return_n_iter=True)
else:
norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
coef_, self.n_iter_ = orthogonal_mp_gram(
Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_,
tol=self.tol, norms_squared=norms_sq,
copy_Gram=True, copy_Xy=True,
return_n_iter=True)
self.coef_ = coef_.T
self._set_intercept(X_offset, y_offset, X_scale)
return self
def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True,
fit_intercept=True, normalize=True, max_iter=100):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied. If
False, they may be overwritten.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 100 by default.
Returns
-------
residues : array, shape (n_samples, max_features)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None,
precompute=False, copy_X=False,
return_path=True)
if coefs.ndim == 1:
coefs = coefs[:, np.newaxis]
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
return np.dot(coefs.T, X_test.T) - y_test
class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin):
"""Cross-validated Orthogonal Matching Pursuit model (OMP)
Parameters
----------
copy : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 10% of ``n_features`` but at least 5 if available.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
intercept_ : float or array, shape (n_targets,)
Independent term in decision function.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the problem formulation).
n_nonzero_coefs_ : int
Estimated number of non-zero coefficients giving the best mean squared
error over the cross-validation folds.
n_iter_ : int or array-like
Number of active features across every target for the model refit with
the best hyperparameters got by cross-validating across all folds.
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
OrthogonalMatchingPursuit
LarsCV
LassoLarsCV
decomposition.sparse_encode
"""
def __init__(self, copy=True, fit_intercept=True, normalize=True,
max_iter=None, cv=None, n_jobs=1, verbose=False):
self.copy = copy
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
y : array-like, shape [n_samples]
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, ensure_min_features=2,
estimator=self)
X = as_float_array(X, copy=False, force_all_finite=False)
cv = check_cv(self.cv, classifier=False)
max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
if not self.max_iter
else self.max_iter)
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_omp_path_residues)(
X[train], y[train], X[test], y[test], self.copy,
self.fit_intercept, self.normalize, max_iter)
for train, test in cv.split(X))
min_early_stop = min(fold.shape[0] for fold in cv_paths)
mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1)
for fold in cv_paths])
best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
self.n_nonzero_coefs_ = best_n_nonzero_coefs
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs,
fit_intercept=self.fit_intercept,
normalize=self.normalize)
omp.fit(X, y)
self.coef_ = omp.coef_
self.intercept_ = omp.intercept_
self.n_iter_ = omp.n_iter_
return self
| bsd-3-clause |
arokem/seaborn | seaborn/relational.py | 2 | 37384 | import warnings
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from ._core import (
VectorPlotter,
)
from .utils import (
locator_to_legend_entries,
adjust_legend_subtitles,
_default_color,
_deprecate_ci,
)
from ._statistics import EstimateAggregator
from .axisgrid import FacetGrid, _facet_docs
from ._decorators import _deprecate_positional_args
from ._docstrings import (
DocstringComponents,
_core_docs,
)
__all__ = ["relplot", "scatterplot", "lineplot"]
_relational_narrative = DocstringComponents(dict(
# --- Introductory prose
main_api="""
The relationship between ``x`` and ``y`` can be shown for different subsets
of the data using the ``hue``, ``size``, and ``style`` parameters. These
parameters control what visual semantics are used to identify the different
subsets. It is possible to show up to three dimensions independently by
using all three semantic types, but this style of plot can be hard to
interpret and is often ineffective. Using redundant semantics (i.e. both
``hue`` and ``style`` for the same variable) can be helpful for making
graphics more accessible.
See the :ref:`tutorial <relational_tutorial>` for more information.
""",
relational_semantic="""
The default treatment of the ``hue`` (and to a lesser extent, ``size``)
semantic, if present, depends on whether the variable is inferred to
represent "numeric" or "categorical" data. In particular, numeric variables
are represented with a sequential colormap by default, and the legend
entries show regular "ticks" with values that may or may not exist in the
data. This behavior can be controlled through various parameters, as
described and illustrated below.
""",
))
_relational_docs = dict(
# --- Shared function parameters
data_vars="""
x, y : names of variables in ``data`` or vector data
Input data variables; must be numeric. Can pass data directly or
reference columns in ``data``.
""",
data="""
data : DataFrame, array, or list of arrays
Input data structure. If ``x`` and ``y`` are specified as names, this
should be a "long-form" DataFrame containing those columns. Otherwise
it is treated as "wide-form" data and grouping variables are ignored.
See the examples for the various ways this parameter can be specified
and the different effects of each.
""",
palette="""
palette : string, list, dict, or matplotlib colormap
An object that determines how colors are chosen when ``hue`` is used.
It can be the name of a seaborn palette or matplotlib colormap, a list
of colors (anything matplotlib understands), a dict mapping levels
of the ``hue`` variable to colors, or a matplotlib colormap object.
""",
hue_order="""
hue_order : list
Specified order for the appearance of the ``hue`` variable levels,
otherwise they are determined from the data. Not relevant when the
``hue`` variable is numeric.
""",
hue_norm="""
hue_norm : tuple or :class:`matplotlib.colors.Normalize` object
Normalization in data units for colormap applied to the ``hue``
variable when it is numeric. Not relevant if it is categorical.
""",
sizes="""
sizes : list, dict, or tuple
An object that determines how sizes are chosen when ``size`` is used.
It can always be a list of size values or a dict mapping levels of the
``size`` variable to sizes. When ``size`` is numeric, it can also be
a tuple specifying the minimum and maximum size to use such that other
values are normalized within this range.
""",
size_order="""
size_order : list
Specified order for appearance of the ``size`` variable levels,
otherwise they are determined from the data. Not relevant when the
``size`` variable is numeric.
""",
size_norm="""
size_norm : tuple or Normalize object
Normalization in data units for scaling plot objects when the
``size`` variable is numeric.
""",
dashes="""
dashes : boolean, list, or dictionary
Object determining how to draw the lines for different levels of the
``style`` variable. Setting to ``True`` will use default dash codes, or
you can pass a list of dash codes or a dictionary mapping levels of the
``style`` variable to dash codes. Setting to ``False`` will use solid
lines for all subsets. Dashes are specified as in matplotlib: a tuple
of ``(segment, gap)`` lengths, or an empty string to draw a solid line.
""",
markers="""
markers : boolean, list, or dictionary
Object determining how to draw the markers for different levels of the
``style`` variable. Setting to ``True`` will use default markers, or
you can pass a list of markers or a dictionary mapping levels of the
``style`` variable to markers. Setting to ``False`` will draw
marker-less lines. Markers are specified as in matplotlib.
""",
style_order="""
style_order : list
Specified order for appearance of the ``style`` variable levels
otherwise they are determined from the data. Not relevant when the
``style`` variable is numeric.
""",
units="""
units : vector or key in ``data``
Grouping variable identifying sampling units. When used, a separate
line will be drawn for each unit with appropriate semantics, but no
legend entry will be added. Useful for showing distribution of
experimental replicates when exact identities are not needed.
""",
estimator="""
estimator : name of pandas method or callable or None
Method for aggregating across multiple observations of the ``y``
variable at the same ``x`` level. If ``None``, all observations will
be drawn.
""",
ci="""
ci : int or "sd" or None
Size of the confidence interval to draw when aggregating.
.. deprecated:: 0.12.0
Use the new `errorbar` parameter for more flexibility.
""",
n_boot="""
n_boot : int
Number of bootstraps to use for computing the confidence interval.
""",
seed="""
seed : int, numpy.random.Generator, or numpy.random.RandomState
Seed or random number generator for reproducible bootstrapping.
""",
legend="""
legend : "auto", "brief", "full", or False
How to draw the legend. If "brief", numeric ``hue`` and ``size``
variables will be represented with a sample of evenly spaced values.
If "full", every group will get an entry in the legend. If "auto",
choose between brief or full representation based on number of levels.
If ``False``, no legend data is added and no legend is drawn.
""",
ax_in="""
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses the current Axes.
""",
ax_out="""
ax : matplotlib Axes
Returns the Axes object with the plot drawn onto it.
""",
)
_param_docs = DocstringComponents.from_nested_components(
core=_core_docs["params"],
facets=DocstringComponents(_facet_docs),
rel=DocstringComponents(_relational_docs),
stat=DocstringComponents.from_function_params(EstimateAggregator.__init__),
)
class _RelationalPlotter(VectorPlotter):
wide_structure = {
"x": "@index", "y": "@values", "hue": "@columns", "style": "@columns",
}
# TODO where best to define default parameters?
sort = True
def add_legend_data(self, ax):
"""Add labeled artists to represent the different plot semantics."""
verbosity = self.legend
if isinstance(verbosity, str) and verbosity not in ["auto", "brief", "full"]:
err = "`legend` must be 'auto', 'brief', 'full', or a boolean."
raise ValueError(err)
elif verbosity is True:
verbosity = "auto"
legend_kwargs = {}
keys = []
# Assign a legend title if there is only going to be one sub-legend,
# otherwise, subtitles will be inserted into the texts list with an
# invisible handle (which is a hack)
titles = {
title for title in
(self.variables.get(v, None) for v in ["hue", "size", "style"])
if title is not None
}
if len(titles) == 1:
legend_title = titles.pop()
else:
legend_title = ""
title_kws = dict(
visible=False, color="w", s=0, linewidth=0, marker="", dashes=""
)
def update(var_name, val_name, **kws):
key = var_name, val_name
if key in legend_kwargs:
legend_kwargs[key].update(**kws)
else:
keys.append(key)
legend_kwargs[key] = dict(**kws)
# Define the maximum number of ticks to use for "brief" legends
brief_ticks = 6
# -- Add a legend for hue semantics
brief_hue = self._hue_map.map_type == "numeric" and (
verbosity == "brief"
or (verbosity == "auto" and len(self._hue_map.levels) > brief_ticks)
)
if brief_hue:
if isinstance(self._hue_map.norm, mpl.colors.LogNorm):
locator = mpl.ticker.LogLocator(numticks=brief_ticks)
else:
locator = mpl.ticker.MaxNLocator(nbins=brief_ticks)
limits = min(self._hue_map.levels), max(self._hue_map.levels)
hue_levels, hue_formatted_levels = locator_to_legend_entries(
locator, limits, self.plot_data["hue"].infer_objects().dtype
)
elif self._hue_map.levels is None:
hue_levels = hue_formatted_levels = []
else:
hue_levels = hue_formatted_levels = self._hue_map.levels
# Add the hue semantic subtitle
if not legend_title and self.variables.get("hue", None) is not None:
update((self.variables["hue"], "title"),
self.variables["hue"], **title_kws)
# Add the hue semantic labels
for level, formatted_level in zip(hue_levels, hue_formatted_levels):
if level is not None:
color = self._hue_map(level)
update(self.variables["hue"], formatted_level, color=color)
# -- Add a legend for size semantics
brief_size = self._size_map.map_type == "numeric" and (
verbosity == "brief"
or (verbosity == "auto" and len(self._size_map.levels) > brief_ticks)
)
if brief_size:
# Define how ticks will interpolate between the min/max data values
if isinstance(self._size_map.norm, mpl.colors.LogNorm):
locator = mpl.ticker.LogLocator(numticks=brief_ticks)
else:
locator = mpl.ticker.MaxNLocator(nbins=brief_ticks)
# Define the min/max data values
limits = min(self._size_map.levels), max(self._size_map.levels)
size_levels, size_formatted_levels = locator_to_legend_entries(
locator, limits, self.plot_data["size"].infer_objects().dtype
)
elif self._size_map.levels is None:
size_levels = size_formatted_levels = []
else:
size_levels = size_formatted_levels = self._size_map.levels
# Add the size semantic subtitle
if not legend_title and self.variables.get("size", None) is not None:
update((self.variables["size"], "title"),
self.variables["size"], **title_kws)
# Add the size semantic labels
for level, formatted_level in zip(size_levels, size_formatted_levels):
if level is not None:
size = self._size_map(level)
update(
self.variables["size"],
formatted_level,
linewidth=size,
s=size,
)
# -- Add a legend for style semantics
# Add the style semantic title
if not legend_title and self.variables.get("style", None) is not None:
update((self.variables["style"], "title"),
self.variables["style"], **title_kws)
# Add the style semantic labels
if self._style_map.levels is not None:
for level in self._style_map.levels:
if level is not None:
attrs = self._style_map(level)
update(
self.variables["style"],
level,
marker=attrs.get("marker", ""),
dashes=attrs.get("dashes", ""),
)
func = getattr(ax, self._legend_func)
legend_data = {}
legend_order = []
for key in keys:
_, label = key
kws = legend_kwargs[key]
kws.setdefault("color", ".2")
use_kws = {}
for attr in self._legend_attributes + ["visible"]:
if attr in kws:
use_kws[attr] = kws[attr]
artist = func([], [], label=label, **use_kws)
if self._legend_func == "plot":
artist = artist[0]
legend_data[key] = artist
legend_order.append(key)
self.legend_title = legend_title
self.legend_data = legend_data
self.legend_order = legend_order
class _LinePlotter(_RelationalPlotter):
_legend_attributes = ["color", "linewidth", "marker", "dashes"]
_legend_func = "plot"
def __init__(
self, *,
data=None, variables={},
estimator=None, ci=None, n_boot=None, seed=None,
sort=True, err_style=None, err_kws=None, legend=None,
errorbar=None,
):
# TODO this is messy, we want the mapping to be agnostic about
# the kind of plot to draw, but for the time being we need to set
# this information so the SizeMapping can use it
self._default_size_range = (
np.r_[.5, 2] * mpl.rcParams["lines.linewidth"]
)
super().__init__(data=data, variables=variables)
self.estimator = estimator
self.errorbar = errorbar
self.ci = ci
self.n_boot = n_boot
self.seed = seed
self.sort = sort
self.err_style = err_style
self.err_kws = {} if err_kws is None else err_kws
self.legend = legend
def plot(self, ax, kws):
"""Draw the plot onto an axes, passing matplotlib kwargs."""
# Draw a test plot, using the passed in kwargs. The goal here is to
# honor both (a) the current state of the plot cycler and (b) the
# specified kwargs on all the lines we will draw, overriding when
# relevant with the data semantics. Note that we won't cycle
# internally; in other words, if ``hue`` is not used, all elements will
# have the same color, but they will have the color that you would have
# gotten from the corresponding matplotlib function, and calling the
# function will advance the axes property cycle.
kws.setdefault("markeredgewidth", kws.pop("mew", .75))
kws.setdefault("markeredgecolor", kws.pop("mec", "w"))
# Set default error kwargs
err_kws = self.err_kws.copy()
if self.err_style == "band":
err_kws.setdefault("alpha", .2)
elif self.err_style == "bars":
pass
elif self.err_style is not None:
err = "`err_style` must be 'band' or 'bars', not {}"
raise ValueError(err.format(self.err_style))
# Initialize the aggregation object
agg = EstimateAggregator(
self.estimator, self.errorbar, n_boot=self.n_boot, seed=self.seed,
)
# TODO abstract variable to aggregate over here-ish. Better name?
agg_var = "y"
grouper = ["x"]
# TODO How to handle NA? We don't want NA to propagate through to the
# estimate/CI when some values are present, but we would also like
# matplotlib to show "gaps" in the line when all values are missing.
# This is straightforward absent aggregation, but complicated with it.
# If we want to use nas, we need to conditionalize dropna in iter_data.
# Loop over the semantic subsets and add to the plot
grouping_vars = "hue", "size", "style"
for sub_vars, sub_data in self.iter_data(grouping_vars, from_comp_data=True):
if self.sort:
sort_vars = ["units", "x", "y"]
sort_cols = [var for var in sort_vars if var in self.variables]
sub_data = sub_data.sort_values(sort_cols)
if self.estimator is not None:
if "units" in self.variables:
# TODO eventually relax this constraint
err = "estimator must be None when specifying units"
raise ValueError(err)
grouped = sub_data.groupby(grouper, sort=self.sort)
# Could pass as_index=False instead of reset_index,
# but that fails on a corner case with older pandas.
sub_data = grouped.apply(agg, agg_var).reset_index()
# TODO this is pretty ad hoc ; see GH2409
for var in "xy":
if self._log_scaled(var):
for col in sub_data.filter(regex=f"^{var}"):
sub_data[col] = np.power(10, sub_data[col])
# --- Draw the main line(s)
if "units" in self.variables: # XXX why not add to grouping variables?
lines = []
for _, unit_data in sub_data.groupby("units"):
lines.extend(ax.plot(unit_data["x"], unit_data["y"], **kws))
else:
lines = ax.plot(sub_data["x"], sub_data["y"], **kws)
for line in lines:
if "hue" in sub_vars:
line.set_color(self._hue_map(sub_vars["hue"]))
if "size" in sub_vars:
line.set_linewidth(self._size_map(sub_vars["size"]))
if "style" in sub_vars:
attributes = self._style_map(sub_vars["style"])
if "dashes" in attributes:
line.set_dashes(attributes["dashes"])
if "marker" in attributes:
line.set_marker(attributes["marker"])
line_color = line.get_color()
line_alpha = line.get_alpha()
line_capstyle = line.get_solid_capstyle()
# --- Draw the confidence intervals
if self.estimator is not None and self.errorbar is not None:
# TODO handling of orientation will need to happen here
if self.err_style == "band":
ax.fill_between(
sub_data["x"], sub_data["ymin"], sub_data["ymax"],
color=line_color, **err_kws
)
elif self.err_style == "bars":
error_deltas = (
sub_data["y"] - sub_data["ymin"],
sub_data["ymax"] - sub_data["y"],
)
ebars = ax.errorbar(
sub_data["x"], sub_data["y"], error_deltas,
linestyle="", color=line_color, alpha=line_alpha,
**err_kws
)
# Set the capstyle properly on the error bars
for obj in ebars.get_children():
if isinstance(obj, mpl.collections.LineCollection):
obj.set_capstyle(line_capstyle)
# Finalize the axes details
self._add_axis_labels(ax)
if self.legend:
self.add_legend_data(ax)
handles, _ = ax.get_legend_handles_labels()
if handles:
legend = ax.legend(title=self.legend_title)
adjust_legend_subtitles(legend)
class _ScatterPlotter(_RelationalPlotter):
_legend_attributes = ["color", "s", "marker"]
_legend_func = "scatter"
def __init__(
self, *,
data=None, variables={},
x_bins=None, y_bins=None,
estimator=None, ci=None, n_boot=None,
alpha=None, x_jitter=None, y_jitter=None,
legend=None
):
# TODO this is messy, we want the mapping to be agnoistic about
# the kind of plot to draw, but for the time being we need to set
# this information so the SizeMapping can use it
self._default_size_range = (
np.r_[.5, 2] * np.square(mpl.rcParams["lines.markersize"])
)
super().__init__(data=data, variables=variables)
self.alpha = alpha
self.legend = legend
def plot(self, ax, kws):
# --- Determine the visual attributes of the plot
data = self.plot_data.dropna()
if data.empty:
return
# Define the vectors of x and y positions
empty = np.full(len(data), np.nan)
x = data.get("x", empty)
y = data.get("y", empty)
# Set defaults for other visual attributes
kws.setdefault("edgecolor", "w")
if "style" in self.variables:
# Use a representative marker so scatter sets the edgecolor
# properly for line art markers. We currently enforce either
# all or none line art so this works.
example_level = self._style_map.levels[0]
example_marker = self._style_map(example_level, "marker")
kws.setdefault("marker", example_marker)
# TODO this makes it impossible to vary alpha with hue which might
# otherwise be useful? Should we just pass None?
kws["alpha"] = 1 if self.alpha == "auto" else self.alpha
# Draw the scatter plot
points = ax.scatter(x=x, y=y, **kws)
# Apply the mapping from semantic variables to artist attributes
if "hue" in self.variables:
points.set_facecolors(self._hue_map(data["hue"]))
if "size" in self.variables:
points.set_sizes(self._size_map(data["size"]))
if "style" in self.variables:
p = [self._style_map(val, "path") for val in data["style"]]
points.set_paths(p)
# Apply dependant default attributes
if "linewidth" not in kws:
sizes = points.get_sizes()
points.set_linewidths(.08 * np.sqrt(np.percentile(sizes, 10)))
# Finalize the axes details
self._add_axis_labels(ax)
if self.legend:
self.add_legend_data(ax)
handles, _ = ax.get_legend_handles_labels()
if handles:
legend = ax.legend(title=self.legend_title)
adjust_legend_subtitles(legend)
@_deprecate_positional_args
def lineplot(
*,
x=None, y=None,
hue=None, size=None, style=None,
data=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
dashes=True, markers=None, style_order=None,
units=None, estimator="mean", ci="deprecated", n_boot=1000, seed=None,
sort=True, err_style="band", err_kws=None,
legend="auto",
errorbar=("ci", 95),
ax=None, **kwargs
):
# Handle deprecation of ci parameter
errorbar = _deprecate_ci(errorbar, ci)
variables = _LinePlotter.get_semantics(locals())
p = _LinePlotter(
data=data, variables=variables,
estimator=estimator, ci=ci, n_boot=n_boot, seed=seed,
sort=sort, err_style=err_style, err_kws=err_kws, legend=legend,
errorbar=errorbar,
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
p.map_size(sizes=sizes, order=size_order, norm=size_norm)
p.map_style(markers=markers, dashes=dashes, order=style_order)
if ax is None:
ax = plt.gca()
if style is None and not {"ls", "linestyle"} & set(kwargs): # XXX
kwargs["dashes"] = "" if dashes is None or isinstance(dashes, bool) else dashes
if not p.has_xy_data:
return ax
p._attach(ax)
# Other functions have color as an explicit param,
# and we should probably do that here too
color = kwargs.pop("color", kwargs.pop("c", None))
kwargs["color"] = _default_color(ax.plot, hue, color, kwargs)
p.plot(ax, kwargs)
return ax
lineplot.__doc__ = """\
Draw a line plot with possibility of several semantic groupings.
{narrative.main_api}
{narrative.relational_semantic}
By default, the plot aggregates over multiple ``y`` values at each value of
``x`` and shows an estimate of the central tendency and a confidence
interval for that estimate.
Parameters
----------
{params.core.xy}
hue : vector or key in ``data``
Grouping variable that will produce lines with different colors.
Can be either categorical or numeric, although color mapping will
behave differently in latter case.
size : vector or key in ``data``
Grouping variable that will produce lines with different widths.
Can be either categorical or numeric, although size mapping will
behave differently in latter case.
style : vector or key in ``data``
Grouping variable that will produce lines with different dashes
and/or markers. Can have a numeric dtype but will always be treated
as categorical.
{params.core.data}
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.rel.sizes}
{params.rel.size_order}
{params.rel.size_norm}
{params.rel.dashes}
{params.rel.markers}
{params.rel.style_order}
{params.rel.units}
{params.rel.estimator}
{params.rel.ci}
{params.rel.n_boot}
{params.rel.seed}
sort : boolean
If True, the data will be sorted by the x and y variables, otherwise
lines will connect points in the order they appear in the dataset.
err_style : "band" or "bars"
Whether to draw the confidence intervals with translucent error bands
or discrete error bars.
err_kws : dict of keyword arguments
Additional paramters to control the aesthetics of the error bars. The
kwargs are passed either to :meth:`matplotlib.axes.Axes.fill_between`
or :meth:`matplotlib.axes.Axes.errorbar`, depending on ``err_style``.
{params.rel.legend}
{params.stat.errorbar}
{params.core.ax}
kwargs : key, value mappings
Other keyword arguments are passed down to
:meth:`matplotlib.axes.Axes.plot`.
Returns
-------
{returns.ax}
See Also
--------
{seealso.scatterplot}
{seealso.pointplot}
Examples
--------
.. include:: ../docstrings/lineplot.rst
""".format(
narrative=_relational_narrative,
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
@_deprecate_positional_args
def scatterplot(
*,
x=None, y=None,
hue=None, style=None, size=None, data=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
markers=True, style_order=None,
x_bins=None, y_bins=None,
units=None, estimator=None, ci=95, n_boot=1000,
alpha=None, x_jitter=None, y_jitter=None,
legend="auto", ax=None,
**kwargs
):
variables = _ScatterPlotter.get_semantics(locals())
p = _ScatterPlotter(
data=data, variables=variables,
x_bins=x_bins, y_bins=y_bins,
estimator=estimator, ci=ci, n_boot=n_boot,
alpha=alpha, x_jitter=x_jitter, y_jitter=y_jitter, legend=legend,
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
p.map_size(sizes=sizes, order=size_order, norm=size_norm)
p.map_style(markers=markers, order=style_order)
if ax is None:
ax = plt.gca()
if not p.has_xy_data:
return ax
p._attach(ax)
# Other functions have color as an explicit param,
# and we should probably do that here too
color = kwargs.pop("color", None)
kwargs["color"] = _default_color(ax.scatter, hue, color, kwargs)
p.plot(ax, kwargs)
return ax
scatterplot.__doc__ = """\
Draw a scatter plot with possibility of several semantic groupings.
{narrative.main_api}
{narrative.relational_semantic}
Parameters
----------
{params.core.xy}
hue : vector or key in ``data``
Grouping variable that will produce points with different colors.
Can be either categorical or numeric, although color mapping will
behave differently in latter case.
size : vector or key in ``data``
Grouping variable that will produce points with different sizes.
Can be either categorical or numeric, although size mapping will
behave differently in latter case.
style : vector or key in ``data``
Grouping variable that will produce points with different markers.
Can have a numeric dtype but will always be treated as categorical.
{params.core.data}
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.rel.sizes}
{params.rel.size_order}
{params.rel.size_norm}
{params.rel.markers}
{params.rel.style_order}
{{x,y}}_bins : lists or arrays or functions
*Currently non-functional.*
{params.rel.units}
*Currently non-functional.*
{params.rel.estimator}
*Currently non-functional.*
{params.rel.ci}
*Currently non-functional.*
{params.rel.n_boot}
*Currently non-functional.*
alpha : float
Proportional opacity of the points.
{{x,y}}_jitter : booleans or floats
*Currently non-functional.*
{params.rel.legend}
{params.core.ax}
kwargs : key, value mappings
Other keyword arguments are passed down to
:meth:`matplotlib.axes.Axes.scatter`.
Returns
-------
{returns.ax}
See Also
--------
{seealso.lineplot}
{seealso.stripplot}
{seealso.swarmplot}
Examples
--------
.. include:: ../docstrings/scatterplot.rst
""".format(
narrative=_relational_narrative,
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
@_deprecate_positional_args
def relplot(
*,
x=None, y=None,
hue=None, size=None, style=None, data=None,
row=None, col=None,
col_wrap=None, row_order=None, col_order=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
markers=None, dashes=None, style_order=None,
legend="auto", kind="scatter",
height=5, aspect=1, facet_kws=None,
units=None,
**kwargs
):
if kind == "scatter":
plotter = _ScatterPlotter
func = scatterplot
markers = True if markers is None else markers
elif kind == "line":
plotter = _LinePlotter
func = lineplot
dashes = True if dashes is None else dashes
else:
err = "Plot kind {} not recognized".format(kind)
raise ValueError(err)
# Check for attempt to plot onto specific axes and warn
if "ax" in kwargs:
msg = (
"relplot is a figure-level function and does not accept "
"the `ax` parameter. You may wish to try {}".format(kind + "plot")
)
warnings.warn(msg, UserWarning)
kwargs.pop("ax")
# Use the full dataset to map the semantics
p = plotter(
data=data,
variables=plotter.get_semantics(locals()),
legend=legend,
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
p.map_size(sizes=sizes, order=size_order, norm=size_norm)
p.map_style(markers=markers, dashes=dashes, order=style_order)
# Extract the semantic mappings
if "hue" in p.variables:
palette = p._hue_map.lookup_table
hue_order = p._hue_map.levels
hue_norm = p._hue_map.norm
else:
palette = hue_order = hue_norm = None
if "size" in p.variables:
sizes = p._size_map.lookup_table
size_order = p._size_map.levels
size_norm = p._size_map.norm
if "style" in p.variables:
style_order = p._style_map.levels
if markers:
markers = {k: p._style_map(k, "marker") for k in style_order}
else:
markers = None
if dashes:
dashes = {k: p._style_map(k, "dashes") for k in style_order}
else:
dashes = None
else:
markers = dashes = style_order = None
# Now extract the data that would be used to draw a single plot
variables = p.variables
plot_data = p.plot_data
plot_semantics = p.semantics
# Define the common plotting parameters
plot_kws = dict(
palette=palette, hue_order=hue_order, hue_norm=hue_norm,
sizes=sizes, size_order=size_order, size_norm=size_norm,
markers=markers, dashes=dashes, style_order=style_order,
legend=False,
)
plot_kws.update(kwargs)
if kind == "scatter":
plot_kws.pop("dashes")
# Add the grid semantics onto the plotter
grid_semantics = "row", "col"
p.semantics = plot_semantics + grid_semantics
p.assign_variables(
data=data,
variables=dict(
x=x, y=y,
hue=hue, size=size, style=style, units=units,
row=row, col=col,
),
)
# Define the named variables for plotting on each facet
# Rename the variables with a leading underscore to avoid
# collisions with faceting variable names
plot_variables = {v: f"_{v}" for v in variables}
plot_kws.update(plot_variables)
# Pass the row/col variables to FacetGrid with their original
# names so that the axes titles render correctly
grid_kws = {v: p.variables.get(v, None) for v in grid_semantics}
# Rename the columns of the plot_data structure appropriately
new_cols = plot_variables.copy()
new_cols.update(grid_kws)
full_data = p.plot_data.rename(columns=new_cols)
# Set up the FacetGrid object
facet_kws = {} if facet_kws is None else facet_kws.copy()
g = FacetGrid(
data=full_data.dropna(axis=1, how="all"),
**grid_kws,
col_wrap=col_wrap, row_order=row_order, col_order=col_order,
height=height, aspect=aspect, dropna=False,
**facet_kws
)
# Draw the plot
g.map_dataframe(func, **plot_kws)
# Label the axes
g.set_axis_labels(
variables.get("x", None), variables.get("y", None)
)
# Show the legend
if legend:
# Replace the original plot data so the legend uses
# numeric data with the correct type
p.plot_data = plot_data
p.add_legend_data(g.axes.flat[0])
if p.legend_data:
g.add_legend(legend_data=p.legend_data,
label_order=p.legend_order,
title=p.legend_title,
adjust_subtitles=True)
# Rename the columns of the FacetGrid's `data` attribute
# to match the original column names
orig_cols = {
f"_{k}": f"_{k}_" if v is None else v for k, v in variables.items()
}
g.data = g.data.rename(columns=orig_cols)
return g
relplot.__doc__ = """\
Figure-level interface for drawing relational plots onto a FacetGrid.
This function provides access to several different axes-level functions
that show the relationship between two variables with semantic mappings
of subsets. The ``kind`` parameter selects the underlying axes-level
function to use:
- :func:`scatterplot` (with ``kind="scatter"``; the default)
- :func:`lineplot` (with ``kind="line"``)
Extra keyword arguments are passed to the underlying function, so you
should refer to the documentation for each to see kind-specific options.
{narrative.main_api}
{narrative.relational_semantic}
After plotting, the :class:`FacetGrid` with the plot is returned and can
be used directly to tweak supporting plot details or add other layers.
Note that, unlike when using the underlying plotting functions directly,
data must be passed in a long-form DataFrame with variables specified by
passing strings to ``x``, ``y``, and other parameters.
Parameters
----------
{params.core.xy}
hue : vector or key in ``data``
Grouping variable that will produce elements with different colors.
Can be either categorical or numeric, although color mapping will
behave differently in latter case.
size : vector or key in ``data``
Grouping variable that will produce elements with different sizes.
Can be either categorical or numeric, although size mapping will
behave differently in latter case.
style : vector or key in ``data``
Grouping variable that will produce elements with different styles.
Can have a numeric dtype but will always be treated as categorical.
{params.core.data}
{params.facets.rowcol}
{params.facets.col_wrap}
row_order, col_order : lists of strings
Order to organize the rows and/or columns of the grid in, otherwise the
orders are inferred from the data objects.
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.rel.sizes}
{params.rel.size_order}
{params.rel.size_norm}
{params.rel.style_order}
{params.rel.dashes}
{params.rel.markers}
{params.rel.legend}
kind : string
Kind of plot to draw, corresponding to a seaborn relational plot.
Options are {{``scatter`` and ``line``}}.
{params.facets.height}
{params.facets.aspect}
facet_kws : dict
Dictionary of other keyword arguments to pass to :class:`FacetGrid`.
{params.rel.units}
kwargs : key, value pairings
Other keyword arguments are passed through to the underlying plotting
function.
Returns
-------
{returns.facetgrid}
Examples
--------
.. include:: ../docstrings/relplot.rst
""".format(
narrative=_relational_narrative,
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
| bsd-3-clause |
great-expectations/great_expectations | great_expectations/dataset/pandas_dataset.py | 1 | 67915 | import inspect
import json
import logging
import warnings
from datetime import datetime
from functools import wraps
from operator import ge, gt, le, lt
from typing import List
import jsonschema
import numpy as np
import pandas as pd
from dateutil.parser import parse
from scipy import stats
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.data_asset import DataAsset
from great_expectations.data_asset.util import DocInherit, parse_result_format
from great_expectations.dataset.util import (
_scipy_distribution_positional_args_from_dict,
is_valid_continuous_partition_object,
validate_distribution_parameters,
)
from .dataset import Dataset
logger = logging.getLogger(__name__)
class MetaPandasDataset(Dataset):
"""MetaPandasDataset is a thin layer between Dataset and PandasDataset.
This two-layer inheritance is required to make @classmethod decorators work.
Practically speaking, that means that MetaPandasDataset implements \
expectation decorators, like `column_map_expectation` and `column_aggregate_expectation`, \
and PandasDataset implements the expectation methods themselves.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@classmethod
def column_map_expectation(cls, func):
"""Constructs an expectation using column-map semantics.
The MetaPandasDataset implementation replaces the "column" parameter supplied by the user with a pandas Series
object containing the actual column from the relevant pandas dataframe. This simplifies the implementing expectation
logic while preserving the standard Dataset signature and expected behavior.
See :func:`column_map_expectation <great_expectations.data_asset.dataset.Dataset.column_map_expectation>` \
for full documentation of this function.
"""
argspec = inspect.getfullargspec(func)[0][1:]
@cls.expectation(argspec)
@wraps(func)
def inner_wrapper(
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
*args,
**kwargs,
):
if result_format is None:
result_format = self.default_expectation_args["result_format"]
result_format = parse_result_format(result_format)
if row_condition and self._supports_row_condition:
data = self._apply_row_condition(
row_condition=row_condition, condition_parser=condition_parser
)
else:
data = self
series = data[column]
if func.__name__ in [
"expect_column_values_to_not_be_null",
"expect_column_values_to_be_null",
]:
# Counting the number of unexpected values can be expensive when there is a large
# number of np.nan values.
# This only happens on expect_column_values_to_not_be_null expectations.
# Since there is no reason to look for most common unexpected values in this case,
# we will instruct the result formatting method to skip this step.
# FIXME rename to mapped_ignore_values?
boolean_mapped_null_values = np.full(series.shape, False)
result_format["partial_unexpected_count"] = 0
else:
boolean_mapped_null_values = series.isnull().values
element_count = int(len(series))
# FIXME rename nonnull to non_ignored?
nonnull_values = series[boolean_mapped_null_values == False]
nonnull_count = int((boolean_mapped_null_values == False).sum())
boolean_mapped_success_values = func(self, nonnull_values, *args, **kwargs)
success_count = np.count_nonzero(boolean_mapped_success_values)
unexpected_list = list(
nonnull_values[boolean_mapped_success_values == False]
)
unexpected_index_list = list(
nonnull_values[boolean_mapped_success_values == False].index
)
if "output_strftime_format" in kwargs:
output_strftime_format = kwargs["output_strftime_format"]
parsed_unexpected_list = []
for val in unexpected_list:
if val is None:
parsed_unexpected_list.append(val)
else:
if isinstance(val, str):
val = parse(val)
parsed_unexpected_list.append(
datetime.strftime(val, output_strftime_format)
)
unexpected_list = parsed_unexpected_list
success, percent_success = self._calc_map_expectation_success(
success_count, nonnull_count, mostly
)
return_obj = self._format_map_output(
result_format,
success,
element_count,
nonnull_count,
len(unexpected_list),
unexpected_list,
unexpected_index_list,
)
# FIXME Temp fix for result format
if func.__name__ in [
"expect_column_values_to_not_be_null",
"expect_column_values_to_be_null",
]:
del return_obj["result"]["unexpected_percent_nonmissing"]
del return_obj["result"]["missing_count"]
del return_obj["result"]["missing_percent"]
try:
del return_obj["result"]["partial_unexpected_counts"]
del return_obj["result"]["partial_unexpected_list"]
except KeyError:
pass
return return_obj
inner_wrapper.__name__ = func.__name__
inner_wrapper.__doc__ = func.__doc__
return inner_wrapper
@classmethod
def column_pair_map_expectation(cls, func):
"""
The column_pair_map_expectation decorator handles boilerplate issues surrounding the common pattern of evaluating
truthiness of some condition on a per row basis across a pair of columns.
"""
argspec = inspect.getfullargspec(func)[0][1:]
@cls.expectation(argspec)
@wraps(func)
def inner_wrapper(
self,
column_A,
column_B,
mostly=None,
ignore_row_if="both_values_are_missing",
result_format=None,
row_condition=None,
condition_parser=None,
*args,
**kwargs,
):
if result_format is None:
result_format = self.default_expectation_args["result_format"]
if row_condition:
self = self.query(row_condition).reset_index(drop=True)
series_A = self[column_A]
series_B = self[column_B]
if ignore_row_if == "both_values_are_missing":
boolean_mapped_null_values = series_A.isnull() & series_B.isnull()
elif ignore_row_if == "either_value_is_missing":
boolean_mapped_null_values = series_A.isnull() | series_B.isnull()
elif ignore_row_if == "never":
boolean_mapped_null_values = series_A.map(lambda x: False)
else:
raise ValueError("Unknown value of ignore_row_if: %s", (ignore_row_if,))
assert len(series_A) == len(
series_B
), "Series A and B must be the same length"
# This next bit only works if series_A and _B are the same length
element_count = int(len(series_A))
nonnull_count = (boolean_mapped_null_values == False).sum()
nonnull_values_A = series_A[boolean_mapped_null_values == False]
nonnull_values_B = series_B[boolean_mapped_null_values == False]
nonnull_values = [
value_pair
for value_pair in zip(list(nonnull_values_A), list(nonnull_values_B))
]
boolean_mapped_success_values = func(
self, nonnull_values_A, nonnull_values_B, *args, **kwargs
)
success_count = boolean_mapped_success_values.sum()
unexpected_list = [
value_pair
for value_pair in zip(
list(
series_A[
(boolean_mapped_success_values == False)
& (boolean_mapped_null_values == False)
]
),
list(
series_B[
(boolean_mapped_success_values == False)
& (boolean_mapped_null_values == False)
]
),
)
]
unexpected_index_list = list(
series_A[
(boolean_mapped_success_values == False)
& (boolean_mapped_null_values == False)
].index
)
success, percent_success = self._calc_map_expectation_success(
success_count, nonnull_count, mostly
)
return_obj = self._format_map_output(
result_format,
success,
element_count,
nonnull_count,
len(unexpected_list),
unexpected_list,
unexpected_index_list,
)
return return_obj
inner_wrapper.__name__ = func.__name__
inner_wrapper.__doc__ = func.__doc__
return inner_wrapper
@classmethod
def multicolumn_map_expectation(cls, func):
"""
The multicolumn_map_expectation decorator handles boilerplate issues surrounding the common pattern of
evaluating truthiness of some condition on a per row basis across a set of columns.
"""
argspec = inspect.getfullargspec(func)[0][1:]
@cls.expectation(argspec)
@wraps(func)
def inner_wrapper(
self,
column_list,
mostly=None,
ignore_row_if="all_values_are_missing",
result_format=None,
row_condition=None,
condition_parser=None,
*args,
**kwargs,
):
if result_format is None:
result_format = self.default_expectation_args["result_format"]
if row_condition:
self = self.query(row_condition).reset_index(drop=True)
test_df = self[column_list]
if ignore_row_if == "all_values_are_missing":
boolean_mapped_skip_values = test_df.isnull().all(axis=1)
elif ignore_row_if == "any_value_is_missing":
boolean_mapped_skip_values = test_df.isnull().any(axis=1)
elif ignore_row_if == "never":
boolean_mapped_skip_values = pd.Series([False] * len(test_df))
else:
raise ValueError("Unknown value of ignore_row_if: %s", (ignore_row_if,))
boolean_mapped_success_values = func(
self, test_df[boolean_mapped_skip_values == False], *args, **kwargs
)
success_count = boolean_mapped_success_values.sum()
nonnull_count = (~boolean_mapped_skip_values).sum()
element_count = len(test_df)
unexpected_list = test_df[
(boolean_mapped_skip_values == False)
& (boolean_mapped_success_values == False)
]
unexpected_index_list = list(unexpected_list.index)
success, percent_success = self._calc_map_expectation_success(
success_count, nonnull_count, mostly
)
return_obj = self._format_map_output(
result_format,
success,
element_count,
nonnull_count,
len(unexpected_list),
unexpected_list.to_dict(orient="records"),
unexpected_index_list,
)
return return_obj
inner_wrapper.__name__ = func.__name__
inner_wrapper.__doc__ = func.__doc__
return inner_wrapper
class PandasDataset(MetaPandasDataset, pd.DataFrame):
"""
PandasDataset instantiates the great_expectations Expectations API as a subclass of a pandas.DataFrame.
For the full API reference, please see :func:`Dataset <great_expectations.data_asset.dataset.Dataset>`
Notes:
1. Samples and Subsets of PandaDataSet have ALL the expectations of the original \
data frame unless the user specifies the ``discard_subset_failing_expectations = True`` \
property on the original data frame.
2. Concatenations, joins, and merges of PandaDataSets contain NO expectations (since no autoinspection
is performed by default).
--ge-feature-maturity-info--
id: validation_engine_pandas
title: Validation Engine - Pandas
icon:
short_description: Use Pandas DataFrame to validate data
description: Use Pandas DataFrame to validate data
how_to_guide_url:
maturity: Production
maturity_details:
api_stability: Stable
implementation_completeness: Complete
unit_test_coverage: Complete
integration_infrastructure_test_coverage: N/A -> see relevant Datasource evaluation
documentation_completeness: Complete
bug_risk: Low
expectation_completeness: Complete
--ge-feature-maturity-info--
"""
# this is necessary to subclass pandas in a proper way.
# NOTE: specifying added properties in this way means that they will NOT be carried over when
# the dataframe is manipulated, which we might want. To specify properties that are carried over
# to manipulation results, we would just use `_metadata = ['row_count', ...]` here. The most likely
# case is that we want the former, but also want to re-initialize these values to None so we don't
# get an attribute error when trying to access them (I think this could be done in __finalize__?)
_internal_names = pd.DataFrame._internal_names + [
"_batch_kwargs",
"_batch_markers",
"_batch_parameters",
"_batch_id",
"_expectation_suite",
"_config",
"caching",
"default_expectation_args",
"discard_subset_failing_expectations",
]
_internal_names_set = set(_internal_names)
_supports_row_condition = True
# We may want to expand or alter support for subclassing dataframes in the future:
# See http://pandas.pydata.org/pandas-docs/stable/extending.html#extending-subclassing-pandas
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.discard_subset_failing_expectations = kwargs.get(
"discard_subset_failing_expectations", False
)
@property
def _constructor(self):
return self.__class__
def __finalize__(self, other, method=None, **kwargs):
if isinstance(other, PandasDataset):
self._initialize_expectations(other._expectation_suite)
# If other was coerced to be a PandasDataset (e.g. via _constructor call during self.copy() operation)
# then it may not have discard_subset_failing_expectations set. Default to self value
self.discard_subset_failing_expectations = getattr(
other,
"discard_subset_failing_expectations",
self.discard_subset_failing_expectations,
)
if self.discard_subset_failing_expectations:
self.discard_failing_expectations()
super().__finalize__(other, method, **kwargs)
return self
def _apply_row_condition(self, row_condition, condition_parser):
if condition_parser not in ["python", "pandas"]:
raise ValueError(
"condition_parser is required when setting a row_condition,"
" and must be 'python' or 'pandas'"
)
else:
return self.query(row_condition, parser=condition_parser).reset_index(
drop=True
)
def get_row_count(self):
return self.shape[0]
def get_column_count(self):
return self.shape[1]
def get_table_columns(self) -> List[str]:
return list(self.columns)
def get_column_sum(self, column):
return self[column].sum()
def get_column_max(self, column, parse_strings_as_datetimes=False):
temp_column = self[column].dropna()
if parse_strings_as_datetimes:
temp_column = temp_column.map(parse)
return temp_column.max()
def get_column_min(self, column, parse_strings_as_datetimes=False):
temp_column = self[column].dropna()
if parse_strings_as_datetimes:
temp_column = temp_column.map(parse)
return temp_column.min()
def get_column_mean(self, column):
return self[column].mean()
def get_column_nonnull_count(self, column):
series = self[column]
null_indexes = series.isnull()
nonnull_values = series[null_indexes == False]
return len(nonnull_values)
def get_column_value_counts(self, column, sort="value", collate=None):
if sort not in ["value", "count", "none"]:
raise ValueError("sort must be either 'value', 'count', or 'none'")
if collate is not None:
raise ValueError("collate parameter is not supported in PandasDataset")
counts = self[column].value_counts()
if sort == "value":
try:
counts.sort_index(inplace=True)
except TypeError:
# Having values of multiple types in a object dtype column (e.g., strings and floats)
# raises a TypeError when the sorting method performs comparisons.
if self[column].dtype == object:
counts.index = counts.index.astype(str)
counts.sort_index(inplace=True)
elif sort == "counts":
counts.sort_values(inplace=True)
counts.name = "count"
counts.index.name = "value"
return counts
def get_column_unique_count(self, column):
return self.get_column_value_counts(column).shape[0]
def get_column_modes(self, column):
return list(self[column].mode().values)
def get_column_median(self, column):
return self[column].median()
def get_column_quantiles(self, column, quantiles, allow_relative_error=False):
interpolation_options = ("linear", "lower", "higher", "midpoint", "nearest")
if not allow_relative_error:
allow_relative_error = "nearest"
if allow_relative_error not in interpolation_options:
raise ValueError(
f"If specified for pandas, allow_relative_error must be one an allowed value for the 'interpolation'"
f"parameter of .quantile() (one of {interpolation_options})"
)
return (
self[column]
.quantile(quantiles, interpolation=allow_relative_error)
.tolist()
)
def get_column_stdev(self, column):
return self[column].std()
def get_column_hist(self, column, bins):
hist, bin_edges = np.histogram(self[column], bins, density=False)
return list(hist)
def get_column_count_in_range(
self, column, min_val=None, max_val=None, strict_min=False, strict_max=True
):
# TODO this logic could probably go in the non-underscore version if we want to cache
if min_val is None and max_val is None:
raise ValueError("Must specify either min or max value")
if min_val is not None and max_val is not None and min_val > max_val:
raise ValueError("Min value must be <= to max value")
result = self[column]
if min_val is not None:
if strict_min:
result = result[result > min_val]
else:
result = result[result >= min_val]
if max_val is not None:
if strict_max:
result = result[result < max_val]
else:
result = result[result <= max_val]
return len(result)
def get_crosstab(
self,
column_A,
column_B,
bins_A=None,
bins_B=None,
n_bins_A=None,
n_bins_B=None,
):
"""Get crosstab of column_A and column_B, binning values if necessary"""
series_A = self.get_binned_values(self[column_A], bins_A, n_bins_A)
series_B = self.get_binned_values(self[column_B], bins_B, n_bins_B)
return pd.crosstab(series_A, columns=series_B)
def get_binned_values(self, series, bins, n_bins):
"""
Get binned values of series.
Args:
Series (pd.Series): Input series
bins (list):
Bins for the series. List of numeric if series is numeric or list of list
of series values else.
n_bins (int): Number of bins. Ignored if bins is not None.
"""
if n_bins is None:
n_bins = 10
if series.dtype in ["int", "float"]:
if bins is not None:
bins = sorted(np.unique(bins))
if np.min(series) < bins[0]:
bins = [np.min(series)] + bins
if np.max(series) > bins[-1]:
bins = bins + [np.max(series)]
if bins is None:
bins = np.histogram_bin_edges(series[series.notnull()], bins=n_bins)
# Make sure max of series is included in rightmost bin
bins[-1] = np.nextafter(bins[-1], bins[-1] + 1)
# Create labels for returned series
# Used in e.g. crosstab that is printed as observed value in data docs.
precision = int(np.log10(min(bins[1:] - bins[:-1]))) + 2
labels = [
f"[{round(lower, precision)}, {round(upper, precision)})"
for lower, upper in zip(bins[:-1], bins[1:])
]
if any(np.isnan(series)):
# Missings get digitized into bin = n_bins+1
labels += ["(missing)"]
return pd.Categorical.from_codes(
codes=np.digitize(series, bins=bins) - 1,
categories=labels,
ordered=True,
)
else:
if bins is None:
value_counts = series.value_counts(sort=True)
if len(value_counts) < n_bins + 1:
return series.fillna("(missing)")
else:
other_values = sorted(value_counts.index[n_bins:])
replace = {value: "(other)" for value in other_values}
else:
replace = dict()
for x in bins:
replace.update({value: ", ".join(x) for value in x})
return (
series.replace(to_replace=replace)
.fillna("(missing)")
.astype("category")
)
### Expectation methods ###
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_unique(
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return ~column.duplicated(keep=False)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_not_be_null(
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
include_nulls=True,
):
return ~column.isnull()
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_null(
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return column.isnull()
@DocInherit
def expect_column_values_to_be_of_type(
self,
column,
type_,
**kwargs
# Since we've now received the default arguments *before* the expectation decorator, we need to
# ensure we only pass what we actually received. Hence, we'll use kwargs
# mostly=None,
# result_format=None,
# row_condition=None, condition_parser=None, include_config=None, catch_exceptions=None, meta=None
):
"""
The pandas implementation of this expectation takes kwargs mostly, result_format, include_config,
catch_exceptions, and meta as other expectations, however it declares **kwargs because it needs to
be able to fork into either aggregate or map semantics depending on the column type (see below).
In Pandas, columns *may* be typed, or they may be of the generic "object" type which can include rows with
different storage types in the same column.
To respect that implementation, the expect_column_values_to_be_of_type expectations will first attempt to
use the column dtype information to determine whether the column is restricted to the provided type. If that
is possible, then expect_column_values_to_be_of_type will return aggregate information including an
observed_value, similarly to other backends.
If it is not possible (because the column dtype is "object" but a more specific type was specified), then
PandasDataset will use column map semantics: it will return map expectation results and
check each value individually, which can be substantially slower.
Unfortunately, the "object" type is also used to contain any string-type columns (including 'str' and
numpy 'string_' (bytes)); consequently, it is not possible to test for string columns using aggregate semantics.
"""
# Short-circuit if the dtype tells us; in that case use column-aggregate (vs map) semantics
if (
self[column].dtype != "object"
or type_ is None
or type_ in ["object", "object_", "O"]
):
res = self._expect_column_values_to_be_of_type__aggregate(
column, type_, **kwargs
)
# Note: this logic is similar to the logic in _append_expectation for deciding when to overwrite an
# existing expectation, but it should be definitely kept in sync
# We do not need this bookkeeping if we are in an active validation:
if self._active_validation:
return res
# First, if there is an existing expectation of this type, delete it. Then change the one we created to be
# of the proper expectation_type
existing_expectations = self._expectation_suite.find_expectation_indexes(
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_of_type",
kwargs={"column": column},
)
)
if len(existing_expectations) == 1:
self._expectation_suite.expectations.pop(existing_expectations[0])
# Now, rename the expectation we just added
new_expectations = self._expectation_suite.find_expectation_indexes(
ExpectationConfiguration(
expectation_type="_expect_column_values_to_be_of_type__aggregate",
kwargs={"column": column},
)
)
assert len(new_expectations) == 1
old_config = self._expectation_suite.expectations[new_expectations[0]]
new_config = ExpectationConfiguration(
expectation_type="expect_column_values_to_be_of_type",
kwargs=old_config.kwargs,
meta=old_config.meta,
success_on_last_run=old_config.success_on_last_run,
)
self._expectation_suite.expectations[new_expectations[0]] = new_config
else:
res = self._expect_column_values_to_be_of_type__map(column, type_, **kwargs)
# Note: this logic is similar to the logic in _append_expectation for deciding when to overwrite an
# existing expectation, but it should be definitely kept in sync
# We do not need this bookkeeping if we are in an active validation:
if self._active_validation:
return res
# First, if there is an existing expectation of this type, delete it. Then change the one we created to be
# of the proper expectation_type
existing_expectations = self._expectation_suite.find_expectation_indexes(
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_of_type",
kwargs={"column": column},
)
)
if len(existing_expectations) == 1:
self._expectation_suite.expectations.pop(existing_expectations[0])
# Now, rename the expectation we just added
new_expectations = self._expectation_suite.find_expectation_indexes(
ExpectationConfiguration(
expectation_type="_expect_column_values_to_be_of_type__map",
kwargs={"column": column},
)
)
assert len(new_expectations) == 1
old_config = self._expectation_suite.expectations[new_expectations[0]]
new_config = ExpectationConfiguration(
expectation_type="expect_column_values_to_be_of_type",
kwargs=old_config.kwargs,
meta=old_config.meta,
success_on_last_run=old_config.success_on_last_run,
)
self._expectation_suite.expectations[new_expectations[0]] = new_config
return res
@DataAsset.expectation(["column", "type_", "mostly"])
def _expect_column_values_to_be_of_type__aggregate(
self,
column,
type_,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if mostly is not None:
raise ValueError(
"PandasDataset cannot support mostly for a column with a non-object dtype."
)
if type_ is None:
success = True
else:
comp_types = []
try:
comp_types.append(np.dtype(type_).type)
except TypeError:
try:
pd_type = getattr(pd, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
try:
pd_type = getattr(pd.core.dtypes.dtypes, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
native_type = self._native_type_type_map(type_)
if native_type is not None:
comp_types.extend(native_type)
success = self[column].dtype.type in comp_types
return {
"success": success,
"result": {"observed_value": self[column].dtype.type.__name__},
}
@staticmethod
def _native_type_type_map(type_):
# We allow native python types in cases where the underlying type is "object":
if type_.lower() == "none":
return (type(None),)
elif type_.lower() == "bool":
return (bool,)
elif type_.lower() in ["int", "long"]:
return (int,)
elif type_.lower() == "float":
return (float,)
elif type_.lower() == "bytes":
return (bytes,)
elif type_.lower() == "complex":
return (complex,)
elif type_.lower() in ["str", "string_types"]:
return (str,)
elif type_.lower() == "list":
return (list,)
elif type_.lower() == "dict":
return (dict,)
elif type_.lower() == "unicode":
return None
@MetaPandasDataset.column_map_expectation
def _expect_column_values_to_be_of_type__map(
self,
column,
type_,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
comp_types = []
try:
comp_types.append(np.dtype(type_).type)
except TypeError:
try:
pd_type = getattr(pd, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
try:
pd_type = getattr(pd.core.dtypes.dtypes, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
native_type = self._native_type_type_map(type_)
if native_type is not None:
comp_types.extend(native_type)
if len(comp_types) < 1:
raise ValueError("Unrecognized numpy/python type: %s" % type_)
return column.map(lambda x: isinstance(x, tuple(comp_types)))
@DocInherit
def expect_column_values_to_be_in_type_list(
self,
column,
type_list,
**kwargs
# Since we've now received the default arguments *before* the expectation decorator, we need to
# ensure we only pass what we actually received. Hence, we'll use kwargs
# mostly=None,
# result_format = None,
# row_condition=None, condition_parser=None, include_config=None, catch_exceptions=None, meta=None
):
"""
The pandas implementation of this expectation takes kwargs mostly, result_format, include_config,
catch_exceptions, and meta as other expectations, however it declares **kwargs because it needs to
be able to fork into either aggregate or map semantics depending on the column type (see below).
In Pandas, columns *may* be typed, or they may be of the generic "object" type which can include rows with
different storage types in the same column.
To respect that implementation, the expect_column_values_to_be_of_type expectations will first attempt to
use the column dtype information to determine whether the column is restricted to the provided type. If that
is possible, then expect_column_values_to_be_of_type will return aggregate information including an
observed_value, similarly to other backends.
If it is not possible (because the column dtype is "object" but a more specific type was specified), then
PandasDataset will use column map semantics: it will return map expectation results and
check each value individually, which can be substantially slower.
Unfortunately, the "object" type is also used to contain any string-type columns (including 'str' and
numpy 'string_' (bytes)); consequently, it is not possible to test for string columns using aggregate semantics.
"""
# Short-circuit if the dtype tells us; in that case use column-aggregate (vs map) semantics
if self[column].dtype != "object" or type_list is None:
res = self._expect_column_values_to_be_in_type_list__aggregate(
column, type_list, **kwargs
)
# Note: this logic is similar to the logic in _append_expectation for deciding when to overwrite an
# existing expectation, but it should be definitely kept in sync
# We do not need this bookkeeping if we are in an active validation:
if self._active_validation:
return res
# First, if there is an existing expectation of this type, delete it. Then change the one we created to be
# of the proper expectation_type
existing_expectations = self._expectation_suite.find_expectation_indexes(
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_type_list",
kwargs={"column": column},
)
)
if len(existing_expectations) == 1:
self._expectation_suite.expectations.pop(existing_expectations[0])
new_expectations = self._expectation_suite.find_expectation_indexes(
ExpectationConfiguration(
expectation_type="_expect_column_values_to_be_in_type_list__aggregate",
kwargs={"column": column},
)
)
assert len(new_expectations) == 1
old_config = self._expectation_suite.expectations[new_expectations[0]]
new_config = ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_type_list",
kwargs=old_config.kwargs,
meta=old_config.meta,
success_on_last_run=old_config.success_on_last_run,
)
self._expectation_suite.expectations[new_expectations[0]] = new_config
else:
res = self._expect_column_values_to_be_in_type_list__map(
column, type_list, **kwargs
)
# Note: this logic is similar to the logic in _append_expectation for deciding when to overwrite an
# existing expectation, but it should be definitely kept in sync
# We do not need this bookkeeping if we are in an active validation:
if self._active_validation:
return res
# First, if there is an existing expectation of this type, delete it. Then change the one we created to be
# of the proper expectation_type
existing_expectations = self._expectation_suite.find_expectation_indexes(
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_type_list",
kwargs={"column": column},
)
)
if len(existing_expectations) == 1:
self._expectation_suite.expectations.pop(existing_expectations[0])
# Now, rename the expectation we just added
new_expectations = self._expectation_suite.find_expectation_indexes(
ExpectationConfiguration(
expectation_type="_expect_column_values_to_be_in_type_list__map",
kwargs={"column": column},
)
)
assert len(new_expectations) == 1
old_config = self._expectation_suite.expectations[new_expectations[0]]
new_config = ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_type_list",
kwargs=old_config.kwargs,
meta=old_config.meta,
success_on_last_run=old_config.success_on_last_run,
)
self._expectation_suite.expectations[new_expectations[0]] = new_config
return res
@MetaPandasDataset.expectation(["column", "type_list", "mostly"])
def _expect_column_values_to_be_in_type_list__aggregate(
self,
column,
type_list,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if mostly is not None:
raise ValueError(
"PandasDataset cannot support mostly for a column with a non-object dtype."
)
if type_list is None:
success = True
else:
comp_types = []
for type_ in type_list:
try:
comp_types.append(np.dtype(type_).type)
except TypeError:
try:
pd_type = getattr(pd, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
try:
pd_type = getattr(pd.core.dtypes.dtypes, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
native_type = self._native_type_type_map(type_)
if native_type is not None:
comp_types.extend(native_type)
success = self[column].dtype.type in comp_types
return {
"success": success,
"result": {"observed_value": self[column].dtype.type.__name__},
}
@MetaPandasDataset.column_map_expectation
def _expect_column_values_to_be_in_type_list__map(
self,
column,
type_list,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
comp_types = []
for type_ in type_list:
try:
comp_types.append(np.dtype(type_).type)
except TypeError:
try:
pd_type = getattr(pd, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
try:
pd_type = getattr(pd.core.dtypes.dtypes, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
native_type = self._native_type_type_map(type_)
if native_type is not None:
comp_types.extend(native_type)
if len(comp_types) < 1:
raise ValueError("No recognized numpy/python type in list: %s" % type_list)
return column.map(lambda x: isinstance(x, tuple(comp_types)))
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_in_set(
self,
column,
value_set,
mostly=None,
parse_strings_as_datetimes=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if value_set is None:
# Vacuously true
return np.ones(len(column), dtype=np.bool_)
if parse_strings_as_datetimes:
parsed_value_set = self._parse_value_set(value_set)
else:
parsed_value_set = value_set
return column.isin(parsed_value_set)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_not_be_in_set(
self,
column,
value_set,
mostly=None,
parse_strings_as_datetimes=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if parse_strings_as_datetimes:
parsed_value_set = self._parse_value_set(value_set)
else:
parsed_value_set = value_set
return ~column.isin(parsed_value_set)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_between(
self,
column,
min_value=None,
max_value=None,
strict_min=False,
strict_max=False, # tolerance=1e-9,
parse_strings_as_datetimes=None,
output_strftime_format=None,
allow_cross_type_comparisons=None,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None")
# if strict_min and min_value:
# min_value += tolerance
#
# if strict_max and max_value:
# max_value -= tolerance
if parse_strings_as_datetimes:
# tolerance = timedelta(days=tolerance)
if min_value:
min_value = parse(min_value)
if max_value:
max_value = parse(max_value)
try:
temp_column = column.map(parse)
except TypeError:
temp_column = column
else:
temp_column = column
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value")
def comparator_factory(comparator, comparison_value):
def new_comparator(value):
return comparator(value, comparison_value)
def always_true(value):
return True
return always_true if comparison_value is None else new_comparator
min_comparator = comparator_factory(gt if strict_min else ge, min_value)
max_comparator = comparator_factory(lt if strict_max else le, max_value)
def cross_type_comparator(val):
try:
return min_comparator(val) & max_comparator(val)
except TypeError:
return False
try:
return min_comparator(temp_column) & max_comparator(temp_column)
except TypeError:
if allow_cross_type_comparisons:
return pd.Series(cross_type_comparator(val) for val in temp_column)
raise TypeError(
"Column values, min_value, and max_value must either be None or of the same type."
)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_increasing(
self,
column,
strictly=None,
parse_strings_as_datetimes=None,
output_strftime_format=None,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if parse_strings_as_datetimes:
temp_column = column.map(parse)
col_diff = temp_column.diff()
# The first element is null, so it gets a bye and is always treated as True
col_diff[0] = pd.Timedelta(1)
if strictly:
return col_diff > pd.Timedelta(0)
else:
return col_diff >= pd.Timedelta(0)
else:
col_diff = column.diff()
# The first element is null, so it gets a bye and is always treated as True
col_diff[col_diff.isnull()] = 1
if strictly:
return col_diff > 0
else:
return col_diff >= 0
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_decreasing(
self,
column,
strictly=None,
parse_strings_as_datetimes=None,
output_strftime_format=None,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if parse_strings_as_datetimes:
temp_column = column.map(parse)
col_diff = temp_column.diff()
# The first element is null, so it gets a bye and is always treated as True
col_diff[0] = pd.Timedelta(-1)
if strictly:
return col_diff < pd.Timedelta(0)
else:
return col_diff <= pd.Timedelta(0)
else:
col_diff = column.diff()
# The first element is null, so it gets a bye and is always treated as True
col_diff[col_diff.isnull()] = -1
if strictly:
return col_diff < 0
else:
return col_diff <= 0
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_value_lengths_to_be_between(
self,
column,
min_value=None,
max_value=None,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None")
# Assert that min_value and max_value are integers
try:
if min_value is not None and not float(min_value).is_integer():
raise ValueError("min_value and max_value must be integers")
if max_value is not None and not float(max_value).is_integer():
raise ValueError("min_value and max_value must be integers")
except ValueError:
raise ValueError("min_value and max_value must be integers")
column_lengths = column.astype(str).str.len()
if min_value is not None and max_value is not None:
return column_lengths.between(min_value, max_value)
elif min_value is None and max_value is not None:
return column_lengths <= max_value
elif min_value is not None and max_value is None:
return column_lengths >= min_value
else:
return False
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_value_lengths_to_equal(
self,
column,
value,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return column.str.len() == value
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_match_regex(
self,
column,
regex,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return column.astype(str).str.contains(regex)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_not_match_regex(
self,
column,
regex,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return ~column.astype(str).str.contains(regex)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_match_regex_list(
self,
column,
regex_list,
match_on="any",
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
regex_matches = []
for regex in regex_list:
regex_matches.append(column.astype(str).str.contains(regex))
regex_match_df = pd.concat(regex_matches, axis=1, ignore_index=True)
if match_on == "any":
return regex_match_df.any(axis="columns")
elif match_on == "all":
return regex_match_df.all(axis="columns")
else:
raise ValueError("match_on must be either 'any' or 'all'")
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_not_match_regex_list(
self,
column,
regex_list,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
regex_matches = []
for regex in regex_list:
regex_matches.append(column.astype(str).str.contains(regex))
regex_match_df = pd.concat(regex_matches, axis=1, ignore_index=True)
return ~regex_match_df.any(axis="columns")
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_match_strftime_format(
self,
column,
strftime_format,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
# Below is a simple validation that the provided format can both format and parse a datetime object.
# %D is an example of a format that can format but not parse, e.g.
try:
datetime.strptime(
datetime.strftime(datetime.now(), strftime_format), strftime_format
)
except ValueError as e:
raise ValueError("Unable to use provided strftime_format. " + str(e))
def is_parseable_by_format(val):
try:
datetime.strptime(val, strftime_format)
return True
except TypeError:
raise TypeError(
"Values passed to expect_column_values_to_match_strftime_format must be of type string.\nIf you want to validate a column of dates or timestamps, please call the expectation before converting from string format."
)
except ValueError:
return False
return column.map(is_parseable_by_format)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_dateutil_parseable(
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
def is_parseable(val):
try:
if type(val) != str:
raise TypeError(
"Values passed to expect_column_values_to_be_dateutil_parseable must be of type string.\nIf you want to validate a column of dates or timestamps, please call the expectation before converting from string format."
)
parse(val)
return True
except (ValueError, OverflowError):
return False
return column.map(is_parseable)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_json_parseable(
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
def is_json(val):
try:
json.loads(val)
return True
except:
return False
return column.map(is_json)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_match_json_schema(
self,
column,
json_schema,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
def matches_json_schema(val):
try:
val_json = json.loads(val)
jsonschema.validate(val_json, json_schema)
# jsonschema.validate raises an error if validation fails.
# So if we make it this far, we know that the validation succeeded.
return True
except jsonschema.ValidationError:
return False
except jsonschema.SchemaError:
raise
except:
raise
return column.map(matches_json_schema)
@DocInherit
@MetaPandasDataset.column_aggregate_expectation
def expect_column_parameterized_distribution_ks_test_p_value_to_be_greater_than(
self,
column,
distribution,
p_value=0.05,
params=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
column = self[column]
if p_value <= 0 or p_value >= 1:
raise ValueError("p_value must be between 0 and 1 exclusive")
# Validate params
try:
validate_distribution_parameters(distribution=distribution, params=params)
except ValueError as e:
raise e
# Format arguments for scipy.kstest
if isinstance(params, dict):
positional_parameters = _scipy_distribution_positional_args_from_dict(
distribution, params
)
else:
positional_parameters = params
# K-S Test
ks_result = stats.kstest(column, distribution, args=positional_parameters)
return {
"success": ks_result[1] >= p_value,
"result": {
"observed_value": ks_result[1],
"details": {
"expected_params": positional_parameters,
"observed_ks_result": ks_result,
},
},
}
@DocInherit
@MetaPandasDataset.column_aggregate_expectation
def expect_column_bootstrapped_ks_test_p_value_to_be_greater_than(
self,
column,
partition_object=None,
p=0.05,
bootstrap_samples=None,
bootstrap_sample_size=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
column = self[column]
if not is_valid_continuous_partition_object(partition_object):
raise ValueError("Invalid continuous partition object.")
# TODO: consider changing this into a check that tail_weights does not exist exclusively, by moving this check into is_valid_continuous_partition_object
if (partition_object["bins"][0] == -np.inf) or (
partition_object["bins"][-1] == np.inf
):
raise ValueError("Partition endpoints must be finite.")
if (
"tail_weights" in partition_object
and np.sum(partition_object["tail_weights"]) > 0
):
raise ValueError(
"Partition cannot have tail weights -- endpoints must be finite."
)
test_cdf = np.append(np.array([0]), np.cumsum(partition_object["weights"]))
def estimated_cdf(x):
return np.interp(x, partition_object["bins"], test_cdf)
if bootstrap_samples is None:
bootstrap_samples = 1000
if bootstrap_sample_size is None:
# Sampling too many elements (or not bootstrapping) will make the test too sensitive to the fact that we've
# compressed via a partition.
# Sampling too few elements will make the test insensitive to significant differences, especially
# for nonoverlapping ranges.
bootstrap_sample_size = len(partition_object["weights"]) * 2
results = [
stats.kstest(
np.random.choice(column, size=bootstrap_sample_size), estimated_cdf
)[1]
for _ in range(bootstrap_samples)
]
test_result = (1 + sum(x >= p for x in results)) / (bootstrap_samples + 1)
hist, bin_edges = np.histogram(column, partition_object["bins"])
below_partition = len(np.where(column < partition_object["bins"][0])[0])
above_partition = len(np.where(column > partition_object["bins"][-1])[0])
# Expand observed partition to report, if necessary
if below_partition > 0 and above_partition > 0:
observed_bins = (
[np.min(column)] + partition_object["bins"] + [np.max(column)]
)
observed_weights = np.concatenate(
([below_partition], hist, [above_partition])
) / len(column)
elif below_partition > 0:
observed_bins = [np.min(column)] + partition_object["bins"]
observed_weights = np.concatenate(([below_partition], hist)) / len(column)
elif above_partition > 0:
observed_bins = partition_object["bins"] + [np.max(column)]
observed_weights = np.concatenate((hist, [above_partition])) / len(column)
else:
observed_bins = partition_object["bins"]
observed_weights = hist / len(column)
observed_cdf_values = np.cumsum(observed_weights)
return_obj = {
"success": test_result > p,
"result": {
"observed_value": test_result,
"details": {
"bootstrap_samples": bootstrap_samples,
"bootstrap_sample_size": bootstrap_sample_size,
"observed_partition": {
"bins": observed_bins,
"weights": observed_weights.tolist(),
},
"expected_partition": {
"bins": partition_object["bins"],
"weights": partition_object["weights"],
},
"observed_cdf": {
"x": observed_bins,
"cdf_values": [0] + observed_cdf_values.tolist(),
},
"expected_cdf": {
"x": partition_object["bins"],
"cdf_values": test_cdf.tolist(),
},
},
},
}
return return_obj
@DocInherit
@MetaPandasDataset.column_pair_map_expectation
def expect_column_pair_values_to_be_equal(
self,
column_A,
column_B,
ignore_row_if="both_values_are_missing",
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return column_A == column_B
@DocInherit
@MetaPandasDataset.column_pair_map_expectation
def expect_column_pair_values_A_to_be_greater_than_B(
self,
column_A,
column_B,
or_equal=None,
parse_strings_as_datetimes=None,
allow_cross_type_comparisons=None,
ignore_row_if="both_values_are_missing",
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
# FIXME
if allow_cross_type_comparisons == True:
raise NotImplementedError
if parse_strings_as_datetimes:
temp_column_A = column_A.map(parse)
temp_column_B = column_B.map(parse)
else:
temp_column_A = column_A
temp_column_B = column_B
if or_equal == True:
return temp_column_A >= temp_column_B
else:
return temp_column_A > temp_column_B
@DocInherit
@MetaPandasDataset.column_pair_map_expectation
def expect_column_pair_values_to_be_in_set(
self,
column_A,
column_B,
value_pairs_set,
ignore_row_if="both_values_are_missing",
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if value_pairs_set is None:
# vacuously true
return np.ones(len(column_A), dtype=np.bool_)
temp_df = pd.DataFrame({"A": column_A, "B": column_B})
value_pairs_set = {(x, y) for x, y in value_pairs_set}
results = []
for i, t in temp_df.iterrows():
if pd.isnull(t["A"]):
a = None
else:
a = t["A"]
if pd.isnull(t["B"]):
b = None
else:
b = t["B"]
results.append((a, b) in value_pairs_set)
return pd.Series(results, temp_df.index)
def expect_multicolumn_values_to_be_unique(
self,
column_list,
mostly=None,
ignore_row_if="all_values_are_missing",
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
deprecation_warning = (
"expect_multicolumn_values_to_be_unique is being deprecated. Please use "
"expect_select_column_values_to_be_unique_within_record instead."
)
warnings.warn(
deprecation_warning,
DeprecationWarning,
)
return self.expect_select_column_values_to_be_unique_within_record(
column_list=column_list,
mostly=mostly,
ignore_row_if=ignore_row_if,
result_format=result_format,
include_config=include_config,
catch_exceptions=catch_exceptions,
meta=meta,
)
@DocInherit
@MetaPandasDataset.multicolumn_map_expectation
def expect_select_column_values_to_be_unique_within_record(
self,
column_list,
mostly=None,
ignore_row_if="all_values_are_missing",
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
threshold = len(column_list.columns)
# Do not dropna here, since we have separately dealt with na in decorator
return column_list.nunique(dropna=False, axis=1) >= threshold
@DocInherit
@MetaPandasDataset.multicolumn_map_expectation
def expect_multicolumn_sum_to_equal(
self,
column_list,
sum_total,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
""" Multi-Column Map Expectation
Expects that sum of all rows for a set of columns is equal to a specific value
Args:
column_list (List[str]): \
Set of columns to be checked
sum_total (int): \
expected sum of columns
"""
return column_list.sum(axis=1) == sum_total
@DocInherit
@MetaPandasDataset.multicolumn_map_expectation
def expect_compound_columns_to_be_unique(
self,
column_list,
mostly=None,
ignore_row_if="all_values_are_missing",
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
# Do not dropna here, since we have separately dealt with na in decorator
# Invert boolean so that duplicates are False and non-duplicates are True
return ~column_list.duplicated(keep=False)
| apache-2.0 |
interrogator/corpkit-app | gui.py | 1 | 316378 | #!/usr/bin/env python
"""
# corpkit GUI
# Daniel McDonald
# This file conains the frontend side of the corpkit gui.
# You can use py2app or pyinstaller on it to make a .app,
# or just run it as a script.
# Below is a string that is used to determine when minor
# updates are available on github for automatic download:
# <updated>DATE-REPLACE</updated>
# Tabbed notebook template created by:
# Patrick T. Cossette <cold_soul79078@yahoo.com>
"""
from __future__ import print_function
import sys
# is string used?
import string
import time
import os
# is threading used?
import threading
try:
import tkMessageBox as messagebox
import tkSimpleDialog as simpledialog
import tkFileDialog as filedialog
except ImportError:
import tkinter.messagebox
import tkinter.filedialog
import tkinter.simpledialog
try:
import Tkinter as tkinter
from Tkinter import *
from ttk import Progressbar, Style
from Tkinter import _setit
except ImportError:
import tkinter
from tkinter import *
from tkinter.ttk import Progressbar, Style
from tkinter import _setit
# todo: delete from the rest of code
from corpkit.corpus import Corpus
# determine path to gui resources:
py_script = False
from_py = False
rd = sys.argv[0]
if sys.platform == 'darwin':
key = 'Mod1'
fext = 'app'
if '.app' in rd:
rd = os.path.join(rd.split('.app', 1)[0] + '.app', 'Contents', 'MacOS')
else:
import corpkit
rd = os.path.dirname(corpkit.__file__)
from_py = True
else:
key = 'Control'
fext = 'exe'
if '.py' in rd:
py_script = True
rd = os.path.dirname(os.path.join(rd.split('.py', 1)[0]))
########################################################################
class SplashScreen(object):
"""
A simple splash screen to display before corpkit is loaded.
"""
def __init__(self, tkRoot, imageFilename, minSplashTime=0):
import os
# if there is some PIL issue, just don't show GUI
# todo: this would also need to disable display of previous figures
self._can_operate = True
try:
from PIL import Image
from PIL import ImageTk
except ImportError:
self._can_operate = False
return
self._root = tkRoot
fname = os.path.join(rd, imageFilename)
if os.path.isfile(fname):
self._image = ImageTk.PhotoImage(file=fname)
self._splash = None
self._minSplashTime = time.time() + minSplashTime
else:
self._image = False
def __enter__(self):
# Remove the app window from the display
#self._root.withdraw( )
if not self._can_operate:
return
if not self._image:
return
# Calculate the geometry to center the splash image
scrnWt = self._root.winfo_screenwidth()
scrnHt = self._root.winfo_screenheight()
imgWt = self._image.width()
imgHt = self._image.height()
imgXPos = (scrnWt / 2) - (imgWt / 2)
imgYPos = (scrnHt / 2) - (imgHt / 2)
# Create the splash screen
self._splash = Toplevel()
self._splash.overrideredirect(1)
self._splash.geometry('+%d+%d' % (imgXPos, imgYPos))
background_label = Label(self._splash, image=self._image)
background_label.grid(row=1, column=1, sticky=W)
# this code shows the version number, but it's ugly.
#import corpkit
#oldstver = str(corpkit.__version__)
#txt = 'Loading corpkit v%s ...' % oldstver
#cnv = Canvas(self._splash, width=200, height=20)
#cnv.create_text((100, 14), text=txt, font=("Helvetica", 14, "bold"))
#cnv.grid(row=1, column=1, sticky='SW', padx=20, pady=20)
self._splash.lift()
self._splash.update( )
def __exit__(self, exc_type, exc_value, traceback ):
# Make sure the minimum splash time has elapsed
if not self._can_operate:
return
if not self._image:
return
timeNow = time.time()
if timeNow < self._minSplashTime:
time.sleep( self._minSplashTime - timeNow )
# Destroy the splash window
self._splash.destroy( )
# Display the application window
#self._root.deiconify( )
class RedirectText(object):
"""Send text to app from stdout, for the log and the status bar"""
def __init__(self, text_ctrl, log_text, text_widget):
"""Constructor"""
def dumfun():
"""to satisfy ipython, sys, which look for a flush method"""
pass
self.output = text_ctrl
self.log = log_text
self.flush = dumfun
self.fileno = dumfun
self.text_widget = text_widget
def write(self, string):
"""Add stdout and stderr to log and/or to console"""
import re
# don't show blank lines
show_reg = re.compile(r'^\s*$')
# delete lobal abs paths from traceback
del_reg = re.compile(r'^/*(Users|usr).*/(site-packages|corpkit/corpkit/)')
if 'Parsing file' not in string and 'Initialising parser' not in string \
and not 'Interrogating subcorpus' in string:
if not re.match(show_reg, string):
string = re.sub(del_reg, '', string)
self.log.append(string.rstrip('\n'))
self.text_widget.config(state='normal')
self.text_widget.delete(1.0, 'end')
self.text_widget.insert('end', string.rstrip('\n'))
self.text_widget.config(state='disabled')
if not re.match(show_reg, string):
if not string.lstrip().startswith('#') and not string.lstrip().startswith('import'):
string = re.sub(del_reg, '', string).rstrip('\n').rstrip()
string = string.split('\n')[-1]
self.output.set(string.lstrip().rstrip('\n').rstrip())
self.text_widget.config(state='normal')
self.text_widget.delete(1.0, 'end')
self.text_widget.insert('end', string.lstrip().rstrip('\n').rstrip())
self.text_widget.config(state='disabled')
class Label2(Frame):
"""a label whose size can be specified in pixels"""
def __init__(self, master, width=0, height=0, **kwargs):
self.width = width
self.height = height
Frame.__init__(self, master, width=self.width, height=self.height)
self.label_widget = Text(self, width=1, **kwargs)
self.label_widget.pack(fill='both', expand=True)
#self.label_widget.config(state=DISABLED)
def pack(self, *args, **kwargs):
Frame.pack(self, *args, **kwargs)
self.pack_propagate(False)
def grid(self, *args, **kwargs):
Frame.grid(self, *args, **kwargs)
self.grid_propagate(False)
class HyperlinkManager:
"""Hyperlinking for About"""
def __init__(self, text):
self.text=text
self.text.tag_config("hyper", foreground="blue", underline=1)
self.text.tag_bind("hyper", "<Enter>", self._enter)
self.text.tag_bind("hyper", "<Leave>", self._leave)
self.text.tag_bind("hyper", "<Button-1>", self._click)
self.reset()
def reset(self):
self.links = {}
def add(self, action):
# add an action to the manager. returns tags to use in
# associated text widget
tag = "hyper-%d" % len(self.links)
self.links[tag] = action
return "hyper", tag
def _enter(self, event):
self.text.config(cursor="hand2")
def _leave(self, event):
self.text.config(cursor="")
def _click(self, event):
for tag in self.text.tag_names(CURRENT):
if tag[:6] == "hyper-":
self.links[tag]()
return
class Notebook(Frame):
"""Notebook Widget"""
def __init__(self, parent, activerelief=RAISED, inactiverelief=FLAT,
xpad=4, ypad=6, activefg='black', inactivefg='black', debug=False,
activefc=("Helvetica", 14, "bold"), inactivefc=("Helvetica", 14), **kw):
"""Construct a Notebook Widget
Notebook(self, parent, activerelief = RAISED, inactiverelief = RIDGE,
xpad = 4, ypad = 6, activefg = 'black', inactivefg = 'black', **kw)
Valid resource names: background, bd, bg, borderwidth, class,
colormap, container, cursor, height, highlightbackground,
highlightcolor, highlightthickness, relief, takefocus, visual, width, activerelief,
inactiverelief, xpad, ypad.
xpad and ypad are values to be used as ipady and ipadx
with the Label widgets that make up the tabs. activefg and inactivefg define what
color the text on the tabs when they are selected, and when they are not
"""
self.activefg = activefg
self.inactivefg = inactivefg
self.activefc = activefc
self.inactivefc = inactivefc
self.deletedTabs = []
self.xpad = xpad
self.ypad = ypad
self.activerelief = activerelief
self.inactiverelief = inactiverelief
self.tabVars = {}
self.tabs = 0
self.progvar = DoubleVar()
self.progvar.set(0)
self.style = Style()
self.style.theme_use("default")
self.style.configure("TProgressbar", thickness=15, foreground='#347DBE', background='#347DBE')
self.kwargs = kw
self.tabVars = {}
self.tabs = 0
# the notebook, with its tabs, middle, status bars
self.noteBookFrame = Frame(parent, bg='#c5c5c5')
self.BFrame = Frame(self.noteBookFrame, bg='#c5c5c5')
self.statusbar = Frame(self.noteBookFrame, bd=2, height=24, width=kw.get('width'), bg='#F4F4F4')
self.noteBook = Frame(self.noteBookFrame, relief=RAISED, bd=2, **kw)
self.noteBook.grid_propagate(0)
# status bar text and log
self.status_text=StringVar()
self.log_stream = []
#self.progspace = Frame(self.statusbar, width=int(kw.get('width') * 0.4))
#self.progspace.grid(sticky=E)
#self.statusbar.grid_columnconfigure(2, weight=5)
self.text = Label2(self.statusbar, #textvariable=self.status_text,
width=int(kw.get('width') * 0.65), height=24, font=("Courier New", 13))
self.progbar = Progressbar(self.statusbar, orient='horizontal',
length=int(kw.get('width') * 0.35),
mode='determinate', variable=self.progvar,
style="TProgressbar")
#self.statusbar.grid_columnconfigure(1, weight=2)
self.statusbar.grid(row=2, column=0)
#self.progbar.pack(anchor=E, fill='x')
self.text.pack(side=LEFT)
self.progbar.pack(side=RIGHT, expand=True)
#self.statusbar.grid_propagate()
# redirect stdout for log
self.redir = RedirectText(self.status_text, self.log_stream, self.text.label_widget)
if not debug:
sys.stdout = self.redir
sys.stderr = self.redir
Frame.__init__(self)
self.noteBookFrame.grid()
self.BFrame.grid(row=0, column=0, columnspan=27, sticky=N) # ", column=13)" puts the tabs in the middle!
self.noteBook.grid(row=1, column=0, columnspan=27)
#self.progbarspace.grid(row=2, column=0, padx=(273, 0), sticky=E)
def change_tab(self, IDNum):
"""Internal Function"""
for i in (a for a in range(0, len(list(self.tabVars.keys())))):
if not i in self.deletedTabs:
if i != IDNum:
self.tabVars[i][1].grid_remove()
self.tabVars[i][0]['relief'] = self.inactiverelief
self.tabVars[i][0]['fg'] = self.inactivefg
self.tabVars[i][0]['font'] = self.inactivefc
self.tabVars[i][0]['bg'] = '#c5c5c5'
else:
self.tabVars[i][1].grid()
self.tabVars[IDNum][0]['relief'] = self.activerelief
self.tabVars[i][0]['fg'] = self.activefg
self.tabVars[i][0]['font'] = self.activefc
self.tabVars[i][0]['bg'] = 'white'
def add_tab(self, width=2, **kw):
import tkinter
"""Creates a new tab, and returns its corresponding frame
"""
temp = self.tabs
self.tabVars[self.tabs] = [Label(self.BFrame, relief = RIDGE, **kw)]
self.tabVars[self.tabs][0].bind("<Button-1>", lambda Event:self.change_tab(temp))
self.tabVars[self.tabs][0].pack(side = LEFT, ipady=self.ypad, ipadx=self.xpad)
self.tabVars[self.tabs].append(Frame(self.noteBook, **self.kwargs))
self.tabVars[self.tabs][1].grid(row=0, column=0)
self.change_tab(0)
self.tabs += 1
return self.tabVars[temp][1]
def destroy_tab(self, tab):
"""Delete a tab from the notebook, as well as it's corresponding frame
"""
self.iteratedTabs = 0
for b in list(self.tabVars.values()):
if b[1] == tab:
b[0].destroy()
self.tabs -= 1
self.deletedTabs.append(self.iteratedTabs)
break
self.iteratedTabs += 1
def focus_on(self, tab):
"""Locate the IDNum of the given tab and use
change_tab to give it focus
"""
self.iteratedTabs = 0
for b in list(self.tabVars.values()):
if b[1] == tab:
self.change_tab(self.iteratedTabs)
break
self.iteratedTabs += 1
def corpkit_gui(noupdate=False, loadcurrent=False, debug=False):
"""
The actual code for the application
:param noupdate: prevent auto update checking
:type noupdate: bool
:param loadcurrent: load this path as the project
:type loadcurrent: str
"""
# make app
root=Tk()
#minimise it
root.withdraw( )
# generate splash
with SplashScreen(root, 'loading_image.png', 1.0):
# set app size
#root.geometry("{0}x{1}+0+0".format(root.winfo_screenwidth(), root.winfo_screenheight()))
import warnings
warnings.filterwarnings("ignore")
import traceback
import dateutil
import sys
import os
import corpkit
from corpkit.process import get_gui_resource_dir, get_fullpath_to_jars
from tkintertable import TableCanvas, TableModel
from nltk.draw.table import MultiListbox, Table
from collections import OrderedDict
from pandas import Series, DataFrame
# stop warning when insecure download is performed
# this somehow raised an attribute error for anrej,
# so we'll allow it to pass ...
import requests
try:
requests.packages.urllib3.disable_warnings()
except AttributeError:
pass
import locale
if sys.platform == 'win32':
try:
locale.setlocale(locale.LC_ALL, 'english-usa')
except:
pass
else:
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
# unused in the gui, dummy imports for pyinstaller
#import seaborn
from hashlib import md5
import chardet
import pyparsing
# a try statement in case not bundling scipy, which
# tends to bloat the .app
try:
from scipy.stats import linregress
except:
pass
# compress some things for a small screen ...
small_screen = root.winfo_screenheight() < 800
#small_screen = True
## add tregex and some other bits to path
paths = ['', 'dictionaries', 'corpkit', 'nltk_data']
for p in paths:
fullp = os.path.join(rd, p).rstrip('/')
if not fullp in sys.path:
sys.path.append(fullp)
# add nltk data to path
import nltk
nltk_data_path = os.path.join(rd, 'nltk_data')
if nltk_data_path not in nltk.data.path:
nltk.data.path.append(os.path.join(rd, 'nltk_data'))
# not sure if needed anymore: more path setting
corpath = os.path.dirname(corpkit.__file__)
baspat = os.path.dirname(os.path.dirname(corpkit.__file__))
dicpath = os.path.join(baspat, 'dictionaries')
os.environ["PATH"] += os.pathsep + corpath + os.pathsep + dicpath
sys.path.append(corpath)
sys.path.append(dicpath)
sys.path.append(baspat)
root.title("corpkit")
root.imagewatched = StringVar()
#root.overrideredirect(True)
root.resizable(FALSE,FALSE)
note_height = 600 if small_screen else 660
note_width = root.winfo_screenwidth()
if note_width > note_height * 1.62:
note_width = note_height * 1.62
note_width = int(note_width)
note = Notebook(root, width=note_width, height=note_height,
activefg='#000000', inactivefg='#585555', debug=debug) #Create a Note book Instance
note.grid()
tab0 = note.add_tab(text="Build")
tab1 = note.add_tab(text="Interrogate")
tab2 = note.add_tab(text="Edit")
tab3 = note.add_tab(text="Visualise")
tab4 = note.add_tab(text="Concordance")
note.text.update_idletasks()
################### ################### ################### ###################
# VARIABLES # # VARIABLES # # VARIABLES # # VARIABLES #
################### ################### ################### ###################
# in this section, some recurring, empty variables are defined
# to do: compress most of the dicts into one
# round up text so we can bind keys to them later
all_text_widgets = []
# for the build tab (could be cleaned up)
chosen_f = []
sentdict = {}
boxes = []
buildbits = {}
most_recent_projects = []
# some variables that will get used throughout the gui
# a dict of the editor frame names and models
editor_tables = {}
currently_in_each_frame = {}
# for conc sort toggle
sort_direction = True
subc_sel_vals = []
subc_sel_vals_build = []
# store every interrogation and conc in this session
all_interrogations = OrderedDict()
all_conc = OrderedDict()
all_images = []
all_interrogations['None'] = 'None'
# corpus path setter
corpus_fullpath = StringVar()
corpus_fullpath.set('')
corenlppath = StringVar()
corenlppath.set(os.path.join(os.path.expanduser("~"), 'corenlp'))
# visualise
# where to put the current figure and frame
thefig = []
oldplotframe = []
# for visualise, this holds a list of subcorpora or entries,
# so that the title will dynamically change at the right time
single_entry_or_subcorpus = {}
# conc
# to do: more consistent use of globals!
itemcoldict = {}
current_conc = ['None']
global conc_saved
conc_saved = False
import itertools
try:
toggle = itertools.cycle([True, False]).__next__
except AttributeError:
toggle = itertools.cycle([True, False]).next
# manage pane: obsolete
manage_box = {}
# custom lists
custom_special_dict = {}
# just the ones on the hd
saved_special_dict = {}
# not currently using this sort feature---should use in conc though
import itertools
try:
direct = itertools.cycle([0,1]).__next__
except AttributeError:
direct = itertools.cycle([0,1]).next
corpus_names_and_speakers = {}
################### ################### ################### ###################
# DICTIONARIES # # DICTIONARIES # # DICTIONARIES # # DICTIONARIES #
################### ################### ################### ###################
qd = {'Subjects': r'__ >># @NP',
'Processes': r'/VB.?/ >># ( VP >+(VP) (VP !> VP $ NP))',
'Modals': r'MD < __',
'Participants': r'/(NN|PRP|JJ).?/ >># (/(NP|ADJP)/ $ VP | > VP)',
'Entities': r'NP <# NNP',
'Any': 'any'}
# concordance colours
colourdict = {1: '#fbb4ae',
2: '#b3cde3',
3: '#ccebc5',
4: '#decbe4',
5: '#fed9a6',
6: '#ffffcc',
7: '#e5d8bd',
8: '#D9DDDB',
9: '#000000',
0: '#F4F4F4'}
# translate search option for interrogator()
transdict = {
'Get distance from root for regex match': 'a',
'Get tag and word of match': 'b',
'Count matches': 'c',
'Get role of match': 'f',
'Get "role:dependent", matching governor': 'd',
'Get ngrams from tokens': 'j',
'Get "role:governor", matching dependent': 'g',
'Get lemmata matching regex': 'l',
'Get tokens by role': 'm',
'Get ngrams from trees': 'n',
'Get part-of-speech tag': 'p',
'Regular expression search': 'r',
'Get tokens matching regex': 't',
'Get stats': 'v',
'Get words': 'w',
'Get tokens by regex': 'h',
'Get tokens matching list': 'e'}
# translate sort_by for editor
sort_trans = {'None': False,
'Total': 'total',
'Inverse total': 'infreq',
'Name': 'name',
'Increase': 'increase',
'Decrease': 'decrease',
'Static': 'static',
'Turbulent': 'turbulent',
'P value': 'p',
'Reverse': 'reverse'}
# translate special queries for interrogator()
spec_quer_translate = {'Participants': 'w',
'Any': 'any',
'Processes': 'w',
'Subjects': 'w',
'Entities': 'w'}
# todo: newer method
from corpkit.constants import transshow, transobjs, LETTERS
from corpkit.process import make_name_to_query_dict
exist = {'Trees': 't', 'Stats': 'v', 'CQL': 'cql'}
convert_name_to_query = make_name_to_query_dict(exist)
# these are example queries for each data type
def_queries = {}
for i in convert_name_to_query.keys():
if i.lower().endswith('function'):
def_queries[i] = r'\b(amod|nn|advm|vmod|tmod)\b'
elif i.lower().endswith('lemma'):
def_queries[i] = r'\b(want|desire|need)\b'
elif i.lower().endswith('word class'):
def_queries[i] = r'^(ad)verb$'
elif i.lower().endswith('index'):
def_queries[i] = r'[012345]',
elif i.lower().endswith('stats'):
def_queries[i] = r'any',
elif i.lower().endswith('cql'):
def_queries[i] = r'[pos="RB" & word=".*ly$"]',
elif i.lower().endswith('pos'):
def_queries[i] = r'^[NJR]',
elif i.lower().endswith('index'):
def_queries[i] = r'[012345]',
elif i.lower().endswith('distance from root'):
def_queries[i] = r'[012345]',
elif i.lower().endswith('trees'):
def_queries[i] = r'JJ > (NP <<# /NN.?/)'
else:
def_queries[i] = r'\b(m.n|wom.n|child(ren)?)\b'
################### ################### ################### ###################
# FUNCTIONS # # FUNCTIONS # # FUNCTIONS # # FUNCTIONS #
################### ################### ################### ###################
# some functions used throughout the gui
def focus_next_window(event):
"""tab to next widget"""
event.widget.tk_focusNext().focus()
try:
event.widget.tk_focusNext().selection_range(0, END)
except:
pass
return "break"
def runner(button, command, conc=False):
"""
Runs the command of a button, disabling the button till it is done,
whether it returns early or not
"""
try:
if button == interrobut or button == interrobut_conc:
command(conc)
else:
command()
except Exception as err:
import traceback
print(traceback.format_exc())
note.progvar.set(0)
button.config(state=NORMAL)
def refresh_images(*args):
"""get list of images saved in images folder"""
import os
if os.path.isdir(image_fullpath.get()):
image_list = sorted([f for f in os.listdir(image_fullpath.get()) if f.endswith('.png')])
for iname in image_list:
if iname.replace('.png', '') not in all_images:
all_images.append(iname.replace('.png', ''))
else:
for i in all_images:
all_images.pop(i)
#refresh()
# if the dummy variable imagewatched is changed, refresh images
# this connects to matplotlib's save button, if the modified
# matplotlib is installed. a better way to do this would be good!
root.imagewatched.trace("w", refresh_images)
def timestring(input):
"""print with time prepended"""
from time import localtime, strftime
thetime = strftime("%H:%M:%S", localtime())
print('%s: %s' % (thetime, input.lstrip()))
def conmap(cnfg, section):
"""helper for load settings"""
dict1 = {}
options = cnfg.options(section)
# todo: this loops over too many times
for option in options:
#try:
opt = cnfg.get(section, option)
if opt == '0':
opt = False
elif opt == '1':
opt = True
elif opt.isdigit():
opt = int(opt)
if isinstance(opt, str) and opt.lower() == 'none':
opt = False
if not opt:
opt = 0
dict1[option] = opt
return dict1
def convert_pandas_dict_to_ints(dict_obj):
"""try to turn pandas as_dict into ints, for tkintertable
the huge try statement is to stop errors when there
is a single corpus --- need to find source of problem
earlier, though"""
vals = []
try:
for a, b in list(dict_obj.items()):
# c = year, d = count
for c, d in list(b.items()):
vals.append(d)
if all([float(x).is_integer() for x in vals if is_number(x)]):
for a, b in list(dict_obj.items()):
for c, d in list(b.items()):
if is_number(d):
b[c] = int(d)
except TypeError:
pass
return dict_obj
def update_spreadsheet(frame_to_update, df_to_show=None, model=False,
height=140, width=False, indexwidth=70):
"""refresh a spreadsheet"""
from collections import OrderedDict
import pandas
# colours for tkintertable
kwarg = {'cellbackgr': '#F7F7FA',
'grid_color': '#c5c5c5',
'entrybackgr': '#F4F4F4',
'selectedcolor': 'white',
'rowselectedcolor': '#b3cde3',
'multipleselectioncolor': '#fbb4ae'}
if width:
kwarg['width'] = width
if model and not df_to_show:
df_to_show = make_df_from_model(model)
#if need_make_totals:
df_to_show = make_df_totals(df_to_show)
if df_to_show is not None:
# for abs freq, make total
model = TableModel()
df_to_show = pandas.DataFrame(df_to_show, dtype=object)
#if need_make_totals(df_to_show):
df_to_show = make_df_totals(df_to_show)
# turn pandas into dict
raw_data = df_to_show.to_dict()
# convert to int if possible
raw_data = convert_pandas_dict_to_ints(raw_data)
table = TableCanvas(frame_to_update, model=model,
showkeynamesinheader=True,
height=height,
rowheaderwidth=row_label_width.get(), cellwidth=cell_width.get(), **kwarg)
table.createTableFrame()
model = table.model
model.importDict(raw_data)
# move columns into correct positions
for index, name in enumerate(list(df_to_show.index)):
model.moveColumn(model.getColumnIndex(name), index)
table.createTableFrame()
# sort the rows
if 'tkintertable-order' in list(df_to_show.index):
table.sortTable(columnName = 'tkintertable-order')
ind = model.columnNames.index('tkintertable-order')
try:
model.deleteColumn(ind)
except:
pass
if 'Total' in list(df_to_show.index):
table.sortTable(columnName='Total', reverse=True)
elif len(df_to_show.index) == 1:
table.sortTable(columnIndex=0, reverse=True)
else:
#nm = os.path.basename(corpus_fullpath.get().rstrip('/'))
ind = len(df_to_show.columns) - 1
table.sortTable(columnIndex = ind, reverse = 1)
#pass
table.redrawTable()
editor_tables[frame_to_update] = model
currently_in_each_frame[frame_to_update] = df_to_show
return
if model:
table = TableCanvas(frame_to_update, model=model,
showkeynamesinheader=True,
height=height,
rowheaderwidth=row_label_width.get(), cellwidth=cell_width.get(),
**kwarg)
table.createTableFrame()
try:
table.sortTable(columnName = 'Total', reverse = direct())
except:
direct()
table.sortTable(reverse = direct())
table.createTableFrame()
table.redrawTable()
else:
table = TableCanvas(frame_to_update, height=height, cellwidth=cell_width.get(),
showkeynamesinheader=True, rowheaderwidth=row_label_width.get(), **kwarg)
table.createTableFrame() # sorts by total freq, ok for now
table.redrawTable()
from corpkit.cql import remake_special
def ignore():
"""turn this on when buttons should do nothing"""
return "break"
def need_make_totals(df):
"""check if a df needs totals"""
if len(list(df.index)) < 3:
return False
try:
x = df.iloc[0,0]
except:
return False
# if was_series, basically
try:
vals = [i for i in list(df.iloc[0,].values) if is_number(i)]
except TypeError:
return False
if len(vals) == 0:
return False
if all([float(x).is_integer() for x in vals]):
return True
else:
return False
def make_df_totals(df):
"""make totals for a dataframe"""
df = df.drop('Total', errors = 'ignore')
# add new totals
df.ix['Total'] = df.drop('tkintertable-order', errors = 'ignore').sum().astype(object)
return df
def make_df_from_model(model):
"""generate df from spreadsheet"""
import pandas
from io import StringIO
recs = model.getAllCells()
colnames = model.columnNames
collabels = model.columnlabels
row = []
csv_data = []
for c in colnames:
row.append(collabels[c])
try:
csv_data.append(','.join([str(s, errors = 'ignore') for s in row]))
except TypeError:
csv_data.append(','.join([str(s) for s in row]))
#csv_data.append('\n')
for row in list(recs.keys()):
rowname = model.getRecName(row)
try:
csv_data.append(','.join([str(rowname, errors = 'ignore')] + [str(s, errors = 'ignore') for s in recs[row]]))
except TypeError:
csv_data.append(','.join([str(rowname)] + [str(s) for s in recs[row]]))
#csv_data.append('\n')
#writer.writerow(recs[row])
csv = '\n'.join(csv_data)
uc = unicode(csv, errors='ignore')
newdata = pandas.read_csv(StringIO(uc), index_col=0, header=0)
newdata = pandas.DataFrame(newdata, dtype=object)
newdata = newdata.T
newdata = newdata.drop('Total', errors='ignore')
newdata = add_tkt_index(newdata)
if need_make_totals(newdata):
newdata = make_df_totals(newdata)
return newdata
def color_saved(lb, savepath=False, colour1='#D9DDDB', colour2='white',
ext='.p', lists=False):
"""make saved items in listbox have colour background
lb: listbox to colour
savepath: where to look for existing files
colour1, colour2: what to colour foundd and not found
ext: what to append to filenames when searching for them
lists: if working with wordlists, things need to be done differently, more colours"""
all_items = [lb.get(i) for i in range(len(lb.get(0, END)))]
# define colours for permanent lists in wordlists
if lists:
colour3 = '#ffffcc'
colour4 = '#fed9a6'
for index, item in enumerate(all_items):
# check if saved
if not lists:
# files or directories with current corpus in name or without
newn = current_corpus.get() + '-' + urlify(item) + ext
a = os.path.isfile(os.path.join(savepath, urlify(item) + ext))
b = os.path.isdir(os.path.join(savepath, urlify(item)))
c = os.path.isfile(os.path.join(savepath, newn))
d = os.path.isdir(os.path.join(savepath, newn))
if any(x for x in [a, b, c, d]):
issaved = True
else:
issaved = False
# for lists, check if permanently stored
else:
issaved = False
if item in list(saved_special_dict.keys()):
issaved = True
if current_corpus.get() + '-' + item in list(saved_special_dict.keys()):
issaved = True
if issaved:
lb.itemconfig(index, {'bg':colour1})
else:
lb.itemconfig(index, {'bg':colour2})
if lists:
if item in list(predict.keys()):
if item.endswith('_ROLE'):
lb.itemconfig(index, {'bg':colour3})
else:
lb.itemconfig(index, {'bg':colour4})
lb.selection_clear(0, END)
def paste_into_textwidget(*args):
"""paste function for widgets ... doesn't seem to work as expected"""
try:
start = args[0].widget.index("sel.first")
end = args[0].widget.index("sel.last")
args[0].widget.delete(start, end)
except TclError as e:
# nothing was selected, so paste doesn't need
# to delete anything
pass
# for some reason, this works with the error.
try:
args[0].widget.insert("insert", clipboard.rstrip('\n'))
except NameError:
pass
def copy_from_textwidget(*args):
"""more commands for textwidgets"""
#args[0].widget.clipboard_clear()
text=args[0].widget.get("sel.first", "sel.last").rstrip('\n')
args[0].widget.clipboard_append(text)
def cut_from_textwidget(*args):
"""more commands for textwidgets"""
text=args[0].widget.get("sel.first", "sel.last")
args[0].widget.clipboard_append(text)
args[0].widget.delete("sel.first", "sel.last")
def select_all_text(*args):
"""more commands for textwidgets"""
try:
args[0].widget.selection_range(0, END)
except:
args[0].widget.tag_add("sel","1.0","end")
def make_corpus_name_from_abs(pfp, cfp):
if pfp in cfp:
return cfp.replace(pfp.rstrip('/') + '/', '')
else:
return cfp
def get_all_corpora():
import os
all_corpora = []
for root, ds, fs in os.walk(corpora_fullpath.get()):
for d in ds:
path = os.path.join(root, d)
relpath = path.replace(corpora_fullpath.get(), '', 1).lstrip('/')
all_corpora.append(relpath)
return sorted(all_corpora)
def update_available_corpora(delete=False):
"""updates corpora in project, and returns a list of them"""
import os
fp = corpora_fullpath.get()
all_corpora = get_all_corpora()
for om in [available_corpora, available_corpora_build]:
om.config(state=NORMAL)
om['menu'].delete(0, 'end')
if not delete:
for corp in all_corpora:
if not corp.endswith('parsed') and not corp.endswith('tokenised') and om == available_corpora:
continue
om['menu'].add_command(label=corp, command=_setit(current_corpus, corp))
return all_corpora
def refresh():
"""refreshes the list of dataframes in the editor and plotter panes"""
import os
# Reset name_of_o_ed_spread and delete all old options
# get the latest only after first interrogation
if len(list(all_interrogations.keys())) == 1:
selected_to_edit.set(list(all_interrogations.keys())[-1])
dataframe1s['menu'].delete(0, 'end')
dataframe2s['menu'].delete(0, 'end')
every_interrogation['menu'].delete(0, 'end')
#every_interro_listbox.delete(0, 'end')
#every_image_listbox.delete(0, 'end')
new_choices = []
for interro in list(all_interrogations.keys()):
new_choices.append(interro)
new_choices = tuple(new_choices)
dataframe2s['menu'].add_command(label='Self', command=_setit(data2_pick, 'Self'))
if project_fullpath.get() != '' and project_fullpath.get() != rd:
dpath = os.path.join(project_fullpath.get(), 'dictionaries')
if os.path.isdir(dpath):
dicts = sorted([f.replace('.p', '') for f in os.listdir(dpath) if os.path.isfile(os.path.join(dpath, f)) and f.endswith('.p')])
for d in dicts:
dataframe2s['menu'].add_command(label=d, command=_setit(data2_pick, d))
for choice in new_choices:
dataframe1s['menu'].add_command(label=choice, command=_setit(selected_to_edit, choice))
dataframe2s['menu'].add_command(label=choice, command=_setit(data2_pick, choice))
every_interrogation['menu'].add_command(label=choice, command=_setit(data_to_plot, choice))
refresh_images()
# refresh
prev_conc_listbox.delete(0, 'end')
for i in sorted(all_conc.keys()):
prev_conc_listbox.insert(END, i)
def add_tkt_index(df):
"""add order to df for tkintertable"""
import pandas
df = df.T
df = df.drop('tkintertable-order', errors = 'ignore', axis=1)
df['tkintertable-order'] = pandas.Series([index for index, data in enumerate(list(df.index))], index = list(df.index))
df = df.T
return df
def namer(name_box_text, type_of_data = 'interrogation'):
"""returns a name to store interrogation/editor result as"""
if name_box_text.lower() == 'untitled' or name_box_text == '':
c = 0
the_name = '%s-%s' % (type_of_data, str(c).zfill(2))
while any(x.startswith(the_name) for x in list(all_interrogations.keys())):
c += 1
the_name = '%s-%s' % (type_of_data, str(c).zfill(2))
else:
the_name = name_box_text
return the_name
def show_prev():
"""show previous interrogation"""
import pandas
currentname = name_of_interro_spreadsheet.get()
# get index of current index
if not currentname:
prev.configure(state=DISABLED)
return
ind = list(all_interrogations.keys()).index(currentname)
# if it's higher than zero
if ind > 0:
if ind == 1:
prev.configure(state=DISABLED)
nex.configure(state=NORMAL)
else:
if ind + 1 < len(list(all_interrogations.keys())):
nex.configure(state=NORMAL)
prev.configure(state=NORMAL)
newname = list(all_interrogations.keys())[ind - 1]
newdata = all_interrogations[newname]
name_of_interro_spreadsheet.set(newname)
i_resultname.set('Interrogation results: %s' % str(name_of_interro_spreadsheet.get()))
if isinstance(newdata, pandas.DataFrame):
toshow = newdata
toshowt = newdata.sum()
elif hasattr(newdata, 'results') and newdata.results is not None:
toshow = newdata.results
if hasattr(newdata, 'totals') and newdata.results is not None:
toshowt = pandas.DataFrame(newdata.totals, dtype=object)
update_spreadsheet(interro_results, toshow, height=340)
update_spreadsheet(interro_totals, toshowt, height=10)
refresh()
else:
prev.configure(state=DISABLED)
nex.configure(state=NORMAL)
def show_next():
"""show next interrogation"""
import pandas
currentname = name_of_interro_spreadsheet.get()
if currentname:
ind = list(all_interrogations.keys()).index(currentname)
else:
ind = 0
if ind > 0:
prev.configure(state=NORMAL)
if ind + 1 < len(list(all_interrogations.keys())):
if ind + 2 == len(list(all_interrogations.keys())):
nex.configure(state=DISABLED)
prev.configure(state=NORMAL)
else:
nex.configure(state=NORMAL)
newname = list(all_interrogations.keys())[ind + 1]
newdata = all_interrogations[newname]
name_of_interro_spreadsheet.set(newname)
i_resultname.set('Interrogation results: %s' % str(name_of_interro_spreadsheet.get()))
if isinstance(newdata, pandas.DataFrame):
toshow = newdata
toshowt = newdata.sum()
elif hasattr(newdata, 'results') and newdata.results is not None:
toshow = newdata.results
if hasattr(newdata, 'totals') and newdata.results is not None:
toshowt = newdata.totals
update_spreadsheet(interro_results, toshow, height=340)
totals_as_df = pandas.DataFrame(toshowt, dtype=object)
update_spreadsheet(interro_totals, toshowt, height=10)
refresh()
else:
nex.configure(state=DISABLED)
prev.configure(state=NORMAL)
def exchange_interro_branch(namedtupname, newdata, branch='results'):
"""replaces a namedtuple results/totals with newdata
--- such a hack, should upgrade to recordtype"""
namedtup = all_interrogations[namedtupname]
the_branch = getattr(namedtup, branch)
if branch == 'results':
the_branch.drop(the_branch.index, inplace=True)
the_branch.drop(the_branch.columns, axis=1, inplace=True)
for i in list(newdata.columns):
the_branch[i] = i
for index, i in enumerate(list(newdata.index)):
the_branch.loc[i] = newdata.ix[index]
elif branch == 'totals':
the_branch.drop(the_branch.index, inplace=True)
for index, datum in zip(newdata.index, newdata.iloc[:,0].values):
the_branch.set_value(index, datum)
all_interrogations[namedtupname] = namedtup
def update_interrogation(table_id, id, is_total=False):
"""takes any changes made to spreadsheet and saves to the interrogation
id: 0 = interrogator
1 = old editor window
2 = new editor window"""
model=editor_tables[table_id]
newdata = make_df_from_model(model)
if need_make_totals(newdata):
newdata = make_df_totals(newdata)
if id == 0:
name_of_interrogation = name_of_interro_spreadsheet.get()
if id == 1:
name_of_interrogation = name_of_o_ed_spread.get()
if id == 2:
name_of_interrogation = name_of_n_ed_spread.get()
if not is_total:
exchange_interro_branch(name_of_interrogation, newdata, branch='results')
else:
exchange_interro_branch(name_of_interrogation, newdata, branch='totals')
def update_all_interrogations(pane='interrogate'):
import pandas
"""update all_interrogations within spreadsheet data
need a very serious cleanup!"""
# to do: only if they are there!
if pane == 'interrogate':
update_interrogation(interro_results, id=0)
update_interrogation(interro_totals, id=0, is_total=True)
if pane == 'edit':
update_interrogation(o_editor_results, id=1)
update_interrogation(o_editor_totals, id=1, is_total=True)
# update new editor sheet if it's there
if name_of_n_ed_spread.get() != '':
update_interrogation(n_editor_results, id=2)
update_interrogation(n_editor_totals, id=2, is_total=True)
timestring('Updated interrogations with manual data.')
if pane == 'interrogate':
the_data = all_interrogations[name_of_interro_spreadsheet.get()]
tot = pandas.DataFrame(the_data.totals, dtype=object)
if the_data.results is not None:
update_spreadsheet(interro_results, the_data.results, height=340)
else:
update_spreadsheet(interro_results, df_to_show=None, height=340)
update_spreadsheet(interro_totals, tot, height=10)
if pane == 'edit':
the_data = all_interrogations[name_of_o_ed_spread.get()]
there_is_new_data = False
try:
newdata = all_interrogations[name_of_n_ed_spread.get()]
there_is_new_data = True
except:
pass
if the_data.results is not None:
update_spreadsheet(o_editor_results, the_data.results, height=140)
update_spreadsheet(o_editor_totals, pandas.DataFrame(the_data.totals, dtype=object), height=10)
if there_is_new_data:
if newdata != 'None' and newdata != '':
if the_data.results is not None:
update_spreadsheet(n_editor_results, newdata.results, height=140)
update_spreadsheet(n_editor_totals, pandas.DataFrame(newdata.totals, dtype=object), height=10)
if name_of_o_ed_spread.get() == name_of_interro_spreadsheet.get():
the_data = all_interrogations[name_of_interro_spreadsheet.get()]
tot = pandas.DataFrame(the_data.totals, dtype=object)
if the_data.results is not None:
update_spreadsheet(interro_results, the_data.results, height=340)
update_spreadsheet(interro_totals, tot, height=10)
timestring('Updated spreadsheet display in edit window.')
from corpkit.process import is_number
################### ################### ################### ###################
#PREFERENCES POPUP# #PREFERENCES POPUP# #PREFERENCES POPUP# #PREFERENCES POPUP#
################### ################### ################### ###################
# make variables with default values
do_auto_update = IntVar()
do_auto_update.set(1)
do_auto_update_this_session = IntVar()
do_auto_update_this_session.set(1)
#conc_when_int = IntVar()
#conc_when_int.set(1)
only_format_match = IntVar()
only_format_match.set(0)
files_as_subcorpora = IntVar()
files_as_subcorpora.set(0)
do_concordancing = IntVar()
do_concordancing.set(1)
show_conc_metadata = IntVar()
show_conc_metadata.set(1)
#noregex = IntVar()
#noregex.set(0)
parser_memory = StringVar()
parser_memory.set(str(2000))
truncate_conc_after = IntVar()
truncate_conc_after.set(9999)
truncate_spreadsheet_after = IntVar()
truncate_spreadsheet_after.set(9999)
corenlppath = StringVar()
corenlppath.set(os.path.join(os.path.expanduser("~"), 'corenlp'))
row_label_width=IntVar()
row_label_width.set(100)
cell_width=IntVar()
cell_width.set(50)
p_val = DoubleVar()
p_val.set(0.05)
# a place for the toplevel entry info
entryboxes = OrderedDict()
# fill it with null data
for i in range(10):
tmp = StringVar()
tmp.set('')
entryboxes[i] = tmp
def preferences_popup():
try:
global toplevel
toplevel.destroy()
except:
pass
from tkinter import Toplevel
pref_pop = Toplevel()
#pref_pop.config(background = '#F4F4F4')
pref_pop.geometry('+300+100')
pref_pop.title("Preferences")
#pref_pop.overrideredirect(1)
pref_pop.wm_attributes('-topmost', 1)
Label(pref_pop, text='').grid(row=0, column=0, pady=2)
def quit_coding(*args):
save_tool_prefs(printout=True)
pref_pop.destroy()
tmp = Checkbutton(pref_pop, text='Automatically check for updates', variable=do_auto_update, onvalue=1, offvalue=0)
if do_auto_update.get() == 1:
tmp.select()
all_text_widgets.append(tmp)
tmp.grid(row=0, column=0, sticky=W)
Label(pref_pop, text='Truncate concordance lines').grid(row=1, column=0, sticky=W)
tmp = Entry(pref_pop, textvariable=truncate_conc_after, width=7)
all_text_widgets.append(tmp)
tmp.grid(row=1, column=1, sticky=E)
Label(pref_pop, text='Truncate spreadsheets').grid(row=2, column=0, sticky=W)
tmp = Entry(pref_pop, textvariable=truncate_spreadsheet_after, width=7)
all_text_widgets.append(tmp)
tmp.grid(row=2, column=1, sticky=E)
Label(pref_pop, text='CoreNLP memory allocation (MB)').grid(row=3, column=0, sticky=W)
tmp = Entry(pref_pop, textvariable=parser_memory, width=7)
all_text_widgets.append(tmp)
tmp.grid(row=3, column=1, sticky=E)
Label(pref_pop, text='Spreadsheet cell width').grid(row=4, column=0, sticky=W)
tmp = Entry(pref_pop, textvariable=cell_width, width=7)
all_text_widgets.append(tmp)
tmp.grid(row=4, column=1, sticky=E)
Label(pref_pop, text='Spreadsheet row header width').grid(row=5, column=0, sticky=W)
tmp = Entry(pref_pop, textvariable=row_label_width, width=7)
all_text_widgets.append(tmp)
tmp.grid(row=5, column=1, sticky=E)
Label(pref_pop, text='P value').grid(row=6, column=0, sticky=W)
tmp = Entry(pref_pop, textvariable=p_val, width=7)
all_text_widgets.append(tmp)
tmp.grid(row=6, column=1, sticky=E)
Label(pref_pop, text='CoreNLP path:', justify=LEFT).grid(row=7, column=0, sticky=W, rowspan = 1)
Button(pref_pop, text='Change', command=set_corenlp_path, width =5).grid(row=7, column=1, sticky=E)
Label(pref_pop, textvariable=corenlppath, justify=LEFT).grid(row=8, column=0, sticky=W)
#set_corenlp_path
tmp = Checkbutton(pref_pop, text='Treat files as subcorpora', variable=files_as_subcorpora, onvalue=1, offvalue=0)
tmp.grid(row=10, column=0, pady=(0,0), sticky=W)
#tmp = Checkbutton(pref_pop, text='Disable regex for plaintext', variable=noregex, onvalue=1, offvalue=0)
#tmp.grid(row=9, column=1, pady=(0,0), sticky=W)
tmp = Checkbutton(pref_pop, text='Do concordancing', variable=do_concordancing, onvalue=1, offvalue=0)
tmp.grid(row=10, column=1, pady=(0,0), sticky=W)
tmp = Checkbutton(pref_pop, text='Format concordance context', variable=only_format_match, onvalue=1, offvalue=0)
tmp.grid(row=11, column=0, pady=(0,0), sticky=W)
tmp = Checkbutton(pref_pop, text='Show concordance metadata', variable=show_conc_metadata, onvalue=1, offvalue=0)
tmp.grid(row=11, column=1, pady=(0,0), sticky=W)
stopbut = Button(pref_pop, text='Done', command=quit_coding)
stopbut.grid(row=12, column=0, columnspan=2, pady=15)
pref_pop.bind("<Return>", quit_coding)
pref_pop.bind("<Tab>", focus_next_window)
################### ################### ################### ###################
# INTERROGATE TAB # # INTERROGATE TAB # # INTERROGATE TAB # # INTERROGATE TAB #
################### ################### ################### ###################
# hopefully weighting the two columns, not sure if works
interro_opt = Frame(tab1)
interro_opt.grid(row=0, column=0)
tab1.grid_columnconfigure(2, weight=5)
def do_interrogation(conc=True):
"""the main function: calls interrogator()"""
import pandas
from corpkit.interrogator import interrogator
from corpkit.interrogation import Interrogation, Interrodict
doing_concondancing = True
# no pressing while running
#if not conc:
interrobut.config(state=DISABLED)
#else:
interrobut_conc.config(state=DISABLED)
recalc_but.config(state=DISABLED)
# progbar to zero
note.progvar.set(0)
for i in list(itemcoldict.keys()):
del itemcoldict[i]
# spelling conversion?
#conv = (spl.var).get()
#if conv == 'Convert spelling' or conv == 'Off':
# conv = False
# lemmatag: do i need to add as button if trees?
lemmatag = False
query = qa.get(1.0, END).replace('\n', '')
if not datatype_picked.get() == 'CQL':
# allow list queries
if query.startswith('[') and query.endswith(']') and ',' in query:
query = query.lstrip('[').rstrip(']').replace("'", '').replace('"', '').replace(' ', '').split(',')
#elif transdict[searchtype()] in ['e', 's']:
#query = query.lstrip('[').rstrip(']').replace("'", '').replace('"', '').replace(' ', '').split(',')
else:
# convert special stuff
query = remake_special(query, customs=custom_special_dict,
case_sensitive=case_sensitive.get())
if query is False:
return
# make name for interrogation
the_name = namer(nametext.get(), type_of_data='interrogation')
cqlmode = IntVar()
cqlmode.set(0)
# get the main query
so = datatype_picked.get()
if so == 'CQL':
cqlmode.set(1)
selected_option = convert_name_to_query.get(so, so)
if selected_option == '':
timestring('You need to select a search type.')
return
queryd = {}
for k, v in list(additional_criteria.items()):
# this should already be done
queryd[k] = v
queryd[selected_option] = query
# cql mode just takes a string
if cqlmode.get():
queryd = query
if selected_option == 'v':
queryd = 'features'
doing_concondancing = False
else:
doing_concondancing = True
# to do: make this order customisable for the gui too
poss_returns = [return_function, return_pos, return_lemma, return_token, \
return_gov, return_dep, return_tree, return_index, return_distance, \
return_count, return_gov_lemma, return_gov_pos, return_gov_func, \
return_dep_lemma, return_dep_pos, return_dep_func, \
return_ngm_lemma, return_ngm_pos, return_ngm_func, return_ngm]
must_make = [return_ngm_lemma, return_ngm_pos, return_ngm_func, return_ngm]
to_show = [prenext_pos.get() + i.get() if i in must_make and i.get() else i.get() for i in poss_returns]
to_show = [i for i in to_show if i and 'Position' not in i]
if not to_show and not selected_option == 'v':
timestring('Interrogation must return something.')
return
if 'c' in to_show:
doing_concondancing = False
if not do_concordancing.get():
doing_concondancing = False
#if noregex.get() == 1:
# regex = False
#else:
# regex = True
subcc = False
just_subc = False
met_field_ids = [by_met_listbox.get(i) for i in by_met_listbox.curselection()]
met_val_ids = [speaker_listbox.get(i) for i in speaker_listbox.curselection()]
if len(met_field_ids) == 1:
met_field_ids = met_field_ids[0]
if len(met_val_ids) == 1:
met_val_ids = met_val_ids[0]
if met_field_ids and not met_val_ids:
if isinstance(met_field_ids, list):
subcc = met_field_ids
else:
if met_field_ids == 'folders':
subcc = False
elif met_field_ids == 'files':
files_as_subcorpora.set(1)
elif met_field_ids == 'none':
# todo: no sub mode?
subcc = False
else:
subcc = met_field_ids
elif not met_field_ids:
subcc = False
elif met_field_ids and met_val_ids:
subcc = met_field_ids
if 'ALL' in met_val_ids:
pass
else:
just_subc = {met_field_ids: met_val_ids}
# default interrogator args: root and note pass the gui itself for updating
# progress bar and so on.
interrogator_args = {'search': queryd,
'show': to_show,
'case_sensitive': bool(case_sensitive.get()),
'no_punct': bool(no_punct.get()),
#'spelling': conv,
'root': root,
'note': note,
'df1_always_df': True,
'conc': doing_concondancing,
'only_format_match': not bool(only_format_match.get()),
#'dep_type': depdict.get(kind_of_dep.get(), 'CC-processed'),
'nltk_data_path': nltk_data_path,
#'regex': regex,
'coref': coref.get(),
'cql': cqlmode.get(),
'files_as_subcorpora': bool(files_as_subcorpora.get()),
'subcorpora': subcc,
'just_metadata': just_subc,
'show_conc_metadata': bool(show_conc_metadata.get()),
'use_interrodict': True}
if debug:
print(interrogator_args)
excludes = {}
for k, v in list(ex_additional_criteria.items()):
if k != 'None':
excludes[k.lower()[0]] = v
if exclude_op.get() != 'None':
q = remake_special(exclude_str.get(), return_list=True,
customs=custom_special_dict,
case_sensitive=case_sensitive.get())
if q:
excludes[exclude_op.get().lower()[0]] = q
if excludes:
interrogator_args['exclude'] = excludes
try:
interrogator_args['searchmode'] = anyall.get()
except:
pass
try:
interrogator_args['excludemode'] = excludemode.get()
except:
pass
# translate lemmatag
tagdict = {'Noun': 'n',
'Adjective': 'a',
'Verb': 'v',
'Adverb': 'r',
'None': False,
'': False,
'Off': False}
#interrogator_args['lemmatag'] = tagdict[lemtag.get()]
if corpus_fullpath.get() == '':
timestring('You need to select a corpus.')
return
# stats preset is actually a search type
#if special_queries.get() == 'Stats':
# selected_option = 'v'
# interrogator_args['query'] = 'any'
# if ngramming, there are two extra options
ngm = ngmsize.var.get()
if ngm != 'Size':
interrogator_args['gramsize'] = int(ngm)
clc = collosize.var.get()
if clc != 'Size':
interrogator_args['window'] = int(clc)
#if subc_pick.get() == "Subcorpus" or subc_pick.get().lower() == 'all' or \
# selected_corpus_has_no_subcorpora.get() == 1:
corp_to_search = corpus_fullpath.get()
#else:
# corp_to_search = os.path.join(corpus_fullpath.get(), subc_pick.get())
# do interrogation, return if empty
if debug:
print('CORPUS:', corp_to_search)
interrodata = interrogator(corp_to_search, **interrogator_args)
if isinstance(interrodata, Interrogation):
if hasattr(interrodata, 'results') and interrodata.results is not None:
if interrodata.results.empty:
timestring('No results found, sorry.')
return
# make sure we're redirecting stdout again
if not debug:
sys.stdout = note.redir
# update spreadsheets
if not isinstance(interrodata, (Interrogation, Interrodict)):
update_spreadsheet(interro_results, df_to_show=None, height=340)
update_spreadsheet(interro_totals, df_to_show=None, height=10)
return
# make non-dict results into dict, so we can iterate no matter
# if there were multiple results or not
interrogation_returned_dict = False
from collections import OrderedDict
if isinstance(interrodata, Interrogation):
dict_of_results = OrderedDict({the_name: interrodata})
else:
dict_of_results = interrodata
interrogation_returned_dict = True
# remove dummy entry from master
all_interrogations.pop('None', None)
# post-process each result and add to master list
for nm, r in sorted(dict_of_results.items(), key=lambda x: x[0]):
# drop over 9999
# type check probably redundant now
if r.results is not None:
large = [n for i, n in enumerate(list(r.results.columns)) if i > truncate_spreadsheet_after.get()]
r.results.drop(large, axis=1, inplace=True)
r.results.drop('Total', errors='ignore', inplace=True)
r.results.drop('Total', errors='ignore', inplace=True, axis=1)
# add interrogation to master list
if interrogation_returned_dict:
all_interrogations[the_name + '-' + nm] = r
all_conc[the_name + '-' + nm] = r.concordance
dict_of_results[the_name + '-' + nm] = dict_of_results.pop(nm)
# make multi for conc...
else:
all_interrogations[nm] = r
all_conc[nm] = r.concordance
# show most recent (alphabetically last) interrogation spreadsheet
recent_interrogation_name = list(dict_of_results.keys())[0]
recent_interrogation_data = list(dict_of_results.values())[0]
if queryd == {'v': 'any'}:
conc = False
if doing_concondancing:
conc_to_show = recent_interrogation_data.concordance
if conc_to_show is not None:
numresults = len(conc_to_show.index)
if numresults > truncate_conc_after.get() - 1:
nums = str(numresults)
if numresults == 9999:
nums += '+'
truncate = messagebox.askyesno("Long results list",
"%s unique concordance results! Truncate to %s?" % (nums, str(truncate_conc_after.get())))
if truncate:
conc_to_show = conc_to_show.head(truncate_conc_after.get())
add_conc_lines_to_window(conc_to_show, preserve_colour=False)
else:
timestring('No concordance results generated.')
global conc_saved
conc_saved = False
name_of_interro_spreadsheet.set(recent_interrogation_name)
i_resultname.set('Interrogation results: %s' % str(name_of_interro_spreadsheet.get()))
# total in a way that tkintertable likes
if isinstance(recent_interrogation_data.totals, int):
recent_interrogation_data.totals = Series(recent_interrogation_data.totals)
totals_as_df = pandas.DataFrame(recent_interrogation_data.totals, dtype=object)
# update spreadsheets
if recent_interrogation_data.results is not None:
update_spreadsheet(interro_results, recent_interrogation_data.results, height=340)
else:
update_spreadsheet(interro_results, df_to_show=None, height=340)
update_spreadsheet(interro_totals, totals_as_df, height=10)
ind = list(all_interrogations.keys()).index(name_of_interro_spreadsheet.get())
if ind == 0:
prev.configure(state=DISABLED)
else:
prev.configure(state=NORMAL)
if ind + 1 == len(list(all_interrogations.keys())):
nex.configure(state=DISABLED)
else:
nex.configure(state=NORMAL)
refresh()
if recent_interrogation_data.results is not None:
subs = r.results.index
else:
subs = r.totals.index
subc_listbox.delete(0, 'end')
for e in list(subs):
if e != 'tkintertable-order':
subc_listbox.insert(END, e)
#reset name
nametext.set('untitled')
if interrogation_returned_dict:
timestring('Interrogation finished, with multiple results.')
interrobut.config(state=NORMAL)
interrobut_conc.config(state=NORMAL)
recalc_but.config(state=NORMAL)
class MyOptionMenu(OptionMenu):
"""Simple OptionMenu for things that don't change."""
def __init__(self, tab1, status, *options):
self.var = StringVar(tab1)
self.var.set(status)
OptionMenu.__init__(self, tab1, self.var, *options)
self.config(font=('calibri',(12)),width=20)
self['menu'].config(font=('calibri',(10)))
def corpus_callback(*args):
"""
On selecting a corpus, set everything appropriately.
also, disable some kinds of search based on the name
"""
if not current_corpus.get():
return
import os
from os.path import join, isdir, isfile, exists
corpus_fullpath.set(join(corpora_fullpath.get(), current_corpus.get()))
fp = corpus_fullpath.get()
from corpkit.corpus import Corpus
corpus = Corpus(fp, print_info=False)
dtype = corpus.datatype
cols = []
if dtype == 'conll':
datatype_picked.set('Word')
try:
cols = corpus.metadata['columns']
except KeyError:
pass
try:
subdrs = sorted([d for d in os.listdir(corpus_fullpath.get()) if os.path.isdir(os.path.join(corpus_fullpath.get(),d))])
except FileNotFoundError:
subdrs = []
if len(subdrs) == 0:
charttype.set('bar')
pick_a_datatype['menu'].delete(0, 'end')
path_to_new_unparsed_corpus.set(fp)
#add_corpus_button.set('Added: "%s"' % os.path.basename(fp))
# why is it setting itself?
#current_corpus.set(os.path.basename(fp))
from corpkit.process import make_name_to_query_dict
exist = {'CQL': 'cql'}
if 'f' in cols:
exist['Trees'] = 't'
exist['Stats'] = 'v'
# todo: only cql for tokenised
convert_name_to_query = make_name_to_query_dict(exist, cols, dtype)
# allow tokenising/parsing of plaintext
if not fp.endswith('-parsed') and not fp.endswith('-tokenised'):
parsebut.config(state=NORMAL)
tokbut.config(state=NORMAL)
parse_button_text.set('Parse: %s' % os.path.basename(fp))
tokenise_button_text.set('Tokenise: %s' % current_corpus.get())
# disable tokenising and parsing of non plaintxt
else:
parsebut.config(state=NORMAL)
tokbut.config(state=NORMAL)
parse_button_text.set('Parse corpus')
tokenise_button_text.set('Tokenise corpus')
parsebut.config(state=DISABLED)
tokbut.config(state=DISABLED)
# no corefs
if not fp.endswith('-parsed') and not fp.endswith('tokenised'):
#pick_dep_type.config(state=DISABLED)
coref_but.config(state=DISABLED)
#parsebut.config(state=NORMAL)
#speakcheck_build.config(state=NORMAL)
interrobut_conc.config(state=DISABLED)
recalc_but.config(state=DISABLED)
#sensplitbut.config(state=NORMAL)
pick_a_datatype.configure(state=DISABLED)
interrobut.configure(state=DISABLED)
interrobut_conc.config(state=DISABLED)
recalc_but.config(state=DISABLED)
else:
interrobut_conc.config(state=NORMAL)
recalc_but.config(state=NORMAL)
pick_a_datatype.configure(state=NORMAL)
interrobut.configure(state=NORMAL)
if datatype_picked.get() not in ['Trees']:
coref_but.config(state=NORMAL)
interrobut_conc.config(state=DISABLED)
recalc_but.config(state=DISABLED)
for i in sorted(convert_name_to_query):
# todo: for now --- simplifying gui!
if i.lower() == 'distance from root' or i.lower().startswith('head'):
continue
pick_a_datatype['menu'].add_command(label=i, command=_setit(datatype_picked, i))
#parsebut.config(state=DISABLED)
#speakcheck_build.config(state=DISABLED)
datatype_picked.set('Word')
if not fp.endswith('-tokenised') and not fp.endswith('-parsed'):
pick_a_datatype['menu'].add_command(label='Word', command=_setit(datatype_picked, 'Word'))
else:
datatype_picked.set('Word')
add_subcorpora_to_build_box(fp)
note.progvar.set(0)
if current_corpus.get() in list(corpus_names_and_speakers.keys()):
refresh_by_metadata()
#speakcheck.config(state=NORMAL)
else:
pass
#speakcheck.config(state=DISABLED)
timestring('Set corpus directory: "%s"' % fp)
editf.set('Edit file: ')
parse_only = [ck4, ck5, ck6, ck7, ck9, ck10, ck11, ck12, ck13, ck14, ck15, ck16]
non_parsed = [ck1, ck8]
if 'l' in cols:
non_parsed.append(ck2)
if 'p' in cols:
non_parsed.append(ck3)
if not current_corpus.get().endswith('-parsed'):
for but in parse_only:
desel_and_turn_off(but)
for but in non_parsed:
turnon(but)
else:
for but in parse_only:
turnon(but)
for but in non_parsed:
turnon(but)
if datatype_picked.get() == 'Trees':
ck4.config(state=NORMAL)
else:
ck4.config(state=DISABLED)
refresh_by_metadata()
Label(interro_opt, text='Corpus/subcorpora:').grid(row=0, column=0, sticky=W)
current_corpus = StringVar()
current_corpus.set('Corpus')
available_corpora = OptionMenu(interro_opt, current_corpus, *tuple(('Select corpus')))
available_corpora.config(width=30, state=DISABLED, justify=CENTER)
current_corpus.trace("w", corpus_callback)
available_corpora.grid(row=0, column=0, columnspan=2, padx=(135,0))
available_corpora_build = OptionMenu(tab0, current_corpus, *tuple(('Select corpus')))
available_corpora_build.config(width=25, justify=CENTER, state=DISABLED)
available_corpora_build.grid(row=4, column=0, sticky=W)
ex_additional_criteria = {}
ex_anyall = StringVar()
ex_anyall.set('any')
ex_objs = OrderedDict()
# fill it with null data
for i in range(20):
tmp = StringVar()
tmp.set('')
ex_objs[i] = [None, None, None, tmp]
ex_permref = []
exclude_str = StringVar()
exclude_str.set('')
Label(interro_opt, text='Exclude:').grid(row=8, column=0, sticky=W, pady=(0, 10))
exclude_op = StringVar()
exclude_op.set('None')
exclude = OptionMenu(interro_opt, exclude_op, *['None'] + sorted(convert_name_to_query.keys()))
exclude.config(width=14)
exclude.grid(row=8, column=0, sticky=W, padx=(60, 0), pady=(0, 10))
qr = Entry(interro_opt, textvariable=exclude_str, width=18, state=DISABLED)
qr.grid(row=8, column=0, columnspan=2, sticky=E, padx=(0,40), pady=(0, 10))
all_text_widgets.append(qr)
ex_plusbut = Button(interro_opt, text='+', \
command=lambda: add_criteria(ex_objs, ex_permref, ex_anyall, ex_additional_criteria, \
exclude_op, exclude_str, title = 'Exclude from interrogation'), \
state=DISABLED)
ex_plusbut.grid(row=8, column=1, sticky=E, pady=(0, 10))
#blklst = StringVar()
#Label(interro_opt, text='Blacklist:').grid(row=12, column=0, sticky=W)
##blklst.set(r'^n')
#blklst.set(r'')
#bkbx = Entry(interro_opt, textvariable=blklst, width=22)
#bkbx.grid(row=12, column=0, columnspan=2, sticky=E)
#all_text_widgets.append(bkbx)
def populate_metavals(evt):
"""
Add the values for a metadata field to the subcorpus box
"""
from corpkit.process import get_corpus_metadata
try:
wx = evt.widget
except:
wx = evt
speaker_listbox.configure(state=NORMAL)
speaker_listbox.delete(0, END)
indices = wx.curselection()
if wx.get(indices[0]) != 'none':
speaker_listbox.insert(END, 'ALL')
for index in indices:
value = wx.get(index)
if value == 'files':
from corpkit.corpus import Corpus
corp = Corpus(current_corpus.get(), print_info=False)
vals = [i.name for i in corp.all_files]
elif value == 'folders':
from corpkit.corpus import Corpus
corp = Corpus(current_corpus.get(), print_info=False)
vals = [i.name for i in corp.subcorpora]
elif value == 'none':
vals = []
else:
meta = get_corpus_metadata(corpus_fullpath.get(), generate=True)
vals = meta['fields'][value]
#vals = get_speaker_names_from_parsed_corpus(corpus_fullpath.get(), value)
for v in vals:
speaker_listbox.insert(END, v)
# lemma tags
#lemtags = tuple(('Off', 'Noun', 'Verb', 'Adjective', 'Adverb'))
#lemtag = StringVar(root)
#lemtag.set('')
#Label(interro_opt, text='Result word class:').grid(row=13, column=0, columnspan=2, sticky=E, padx=(0, 120))
#lmt = OptionMenu(interro_opt, lemtag, *lemtags)
#lmt.config(state=NORMAL, width=10)
#lmt.grid(row=13, column=1, sticky=E)
#lemtag.trace("w", d_callback)
def refresh_by_metadata(*args):
"""
Add metadata for a corpus from dotfile to listbox
"""
import os
if os.path.isdir(corpus_fullpath.get()):
from corpkit.process import get_corpus_metadata
ns = get_corpus_metadata(corpus_fullpath.get(), generate=True)
ns = list(ns.get('fields', {}))
#ns = corpus_names_and_speakers[os.path.basename(corpus_fullpath.get())]
else:
return
speaker_listbox.delete(0, 'end')
# figure out which list we need to add to, and which we should del from
lbs = []
delfrom = []
# todo: this should be, if new corpus, delfrom...
if True:
lbs.append(by_met_listbox)
else:
delfrom.append(by_met_listbox)
# add names
for lb in lbs:
lb.configure(state=NORMAL)
lb.delete(0, END)
from corpkit.corpus import Corpus
corp = Corpus(current_corpus.get(), print_info=False)
if corp.level == 'c':
lb.insert(END, 'folders')
lb.insert(END, 'files')
for idz in sorted(ns):
lb.insert(END, idz)
lb.insert(END, 'none')
# or delete names
for lb in delfrom:
lb.configure(state=NORMAL)
lb.delete(0, END)
lb.configure(state=DISABLED)
by_met_listbox.selection_set(0)
populate_metavals(by_met_listbox)
# by metadata
by_meta_scrl = Frame(interro_opt)
by_meta_scrl.grid(row=1, column=0, rowspan=2, sticky='w', padx=(5,0), pady=(5, 5))
# scrollbar for the listbox
by_met_bar = Scrollbar(by_meta_scrl)
by_met_bar.pack(side=RIGHT, fill=Y)
# listbox itself
slist_height = 2 if small_screen else 6
by_met_listbox = Listbox(by_meta_scrl, selectmode=EXTENDED, width=12, height=slist_height,
relief=SUNKEN, bg='#F4F4F4',
yscrollcommand=by_met_bar.set, exportselection=False)
by_met_listbox.pack()
by_met_bar.config(command=by_met_listbox.yview)
xx = by_met_listbox.bind('<<ListboxSelect>>', populate_metavals)
# frame to hold metadata values listbox
spk_scrl = Frame(interro_opt)
spk_scrl.grid(row=1, column=0, rowspan=2, columnspan=2, sticky=E, pady=(5,5))
# scrollbar for the listbox
spk_sbar = Scrollbar(spk_scrl)
spk_sbar.pack(side=RIGHT, fill=Y)
# listbox itself
speaker_listbox = Listbox(spk_scrl, selectmode=EXTENDED, width=29, height=slist_height,
relief=SUNKEN, bg='#F4F4F4',
yscrollcommand=spk_sbar.set, exportselection=False)
speaker_listbox.pack()
speaker_listbox.configure(state=DISABLED)
spk_sbar.config(command=speaker_listbox.yview)
# dep type
#dep_types = tuple(('Basic', 'Collapsed', 'CC-processed'))
#kind_of_dep = StringVar(root)
#kind_of_dep.set('CC-processed')
#Label(interro_opt, text='Dependency type:').grid(row=16, column=0, sticky=W)
#pick_dep_type = OptionMenu(interro_opt, kind_of_dep, *dep_types)
#pick_dep_type.config(state=DISABLED)
#pick_dep_type.grid(row=16, column=0, sticky=W, padx=(125,0))
#kind_of_dep.trace("w", d_callback)
coref = IntVar(root)
coref.set(False)
coref_but = Checkbutton(interro_opt, text='Count coreferents', variable=coref, onvalue=True, offvalue=False)
coref_but.grid(row=6, column=1, sticky=E, pady=(5,0))
coref_but.config(state=DISABLED)
# query
entrytext=StringVar()
Label(interro_opt, text='Query:').grid(row=4, column=0, sticky='NW', pady=(5,0))
entrytext.set(r'\b(m.n|wom.n|child(ren)?)\b')
qa_height = 2 if small_screen else 6
qa = Text(interro_opt, width=40, height=qa_height, borderwidth=0.5,
font=("Courier New", 14), undo=True, relief=SUNKEN, wrap=WORD, highlightthickness=0)
qa.insert(END, entrytext.get())
qa.grid(row=4, column=0, columnspan=2, sticky=E, pady=(5,5), padx=(0, 4))
all_text_widgets.append(qa)
additional_criteria = {}
anyall = StringVar()
anyall.set('all')
objs = OrderedDict()
# fill it with null data
for i in range(20):
tmp = StringVar()
tmp.set('')
objs[i] = [None, None, None, tmp]
permref = []
def add_criteria(objs, permref, anyalltoggle, output_dict,
optvar, enttext, title = "Additional criteria"):
"""this is a popup for adding additional search criteria.
it's also used for excludes"""
if title == 'Additional criteria':
enttext.set(qa.get(1.0, END).strip('\n').strip())
from tkinter import Toplevel
try:
more_criteria = permref[0]
more_criteria.deiconify()
return
except:
pass
more_criteria = Toplevel()
more_criteria.geometry('+500+100')
more_criteria.title(title)
more_criteria.wm_attributes('-topmost', 1)
total = 0
n_items = []
def quit_q(total, *args):
"""exit popup, saving entries"""
poss_keys = []
for index, (option, optvar, entbox, entstring) in enumerate(list(objs.values())[:total]):
if index == 0:
enttext.set(entstring.get())
optvar.set(optvar.get())
datatype_picked.set(optvar.get())
if optvar is not None:
o = convert_name_to_query.get(optvar.get(), optvar.get())
q = entstring.get().strip()
q = remake_special(q, customs=custom_special_dict,
case_sensitive=case_sensitive.get(), return_list=True)
output_dict[o] = q
# may not work on mac ...
if title == 'Additional criteria':
if len(list(objs.values())[:total]) > 0:
plusbut.config(bg='#F4F4F4')
else:
plusbut.config(bg='white')
else:
if len(list(objs.values())[:total]) > 0:
ex_plusbut.config(bg='#F4F4F4')
else:
ex_plusbut.config(bg='white')
more_criteria.withdraw()
def remove_prev():
"""delete last added criteria line"""
if len([k for k, v in objs.items() if v[0] is not None]) < 2:
pass
else:
ans = 0
for k, (a, b, c, d) in reversed(list(objs.items())):
if a is not None:
ans = k
break
if objs[ans][0] is not None:
objs[ans][0].destroy()
optvar = objs[ans][1].get()
try:
del output_dict[convert_name_to_query[optvar]]
except:
pass
objs[ans][1] = StringVar()
if objs[ans][2] is not None:
objs[ans][2].destroy()
objs[ans][3] = StringVar()
objs.pop(ans, None)
def clear_q():
"""clear the popup"""
for optmenu, optvar, entbox, entstring in list(objs.values()):
if optmenu is not None:
optvar.set('Word')
entstring.set('')
def new_item(total, optvar, enttext, init = False):
"""add line to popup"""
for i in n_items:
i.destroy()
for i in n_items:
n_items.remove(i)
chosen = StringVar()
poss = ['None'] + sorted(convert_name_to_query.keys())
poss = [k for k in poss if not 'distance' in k.lower() and not 'head ' in k.lower()]
chosen.set('Word')
opt = OptionMenu(more_criteria, chosen, *poss)
opt.config(width=16)
t = total + 1
opt.grid(row=total, column=0, sticky=W)
text_str = StringVar()
text_str.set('')
text=Entry(more_criteria, textvariable=text_str, width=40, font=("Courier New", 13))
all_text_widgets.append(text)
text.grid(row=total, column=1)
objs[total] = [opt, chosen, text, text_str]
minuser = Button(more_criteria, text='-', command=remove_prev)
minuser.grid(row=total + 2, column=0, sticky=W, padx=(38,0))
plusser = Button(more_criteria, text='+', command=lambda : new_item(t, optvar, enttext))
plusser.grid(row=total + 2, column=0, sticky=W)
stopbut = Button(more_criteria, text='Done', command=lambda : quit_q(t))
stopbut.grid(row=total + 2, column=1, sticky=E)
clearbut = Button(more_criteria, text='Clear', command=clear_q)
clearbut.grid(row=total + 2, column=1, sticky=E, padx=(0, 60))
r1 = Radiobutton(more_criteria, text='Match any', variable=anyalltoggle, value= 'any')
r1.grid(row=total + 2, column=0, columnspan=2, sticky=E, padx=(0,150))
r2 = Radiobutton(more_criteria, text='Match all', variable=anyalltoggle, value= 'all')
r2.grid(row=total + 2, column=0, columnspan=2, sticky=E, padx=(0,250))
n_items.append(plusser)
n_items.append(stopbut)
n_items.append(minuser)
n_items.append(clearbut)
n_items.append(r1)
n_items.append(r2)
if init:
text_str.set(enttext.get())
chosen.set(optvar.get())
minuser.config(state=DISABLED)
else:
minuser.config(state=NORMAL)
return t
if objs:
for optmenu, optvar, entbox, entstring in list(objs.values()):
optmenu.grid()
entbox.grid()
# make the first button with defaults
total = new_item(total, optvar, enttext, init = True)
if more_criteria not in permref:
permref.append(more_criteria)
plusbut = Button(interro_opt, text='+', \
command=lambda: add_criteria(objs, permref, anyall, \
additional_criteria, datatype_picked, entrytext), \
state=NORMAL)
plusbut.grid(row=4, column=0, columnspan=1, padx=(25,0), pady=(10,0), sticky='w')
def entry_callback(*args):
"""when entry is changed, add it to the textbox"""
qa.config(state=NORMAL)
qa.delete(1.0, END)
qa.insert(END, entrytext.get())
entrytext.trace("w", entry_callback)
def onselect(evt):
"""when an option is selected, add the example query
for ngrams, add the special ngram options"""
w = evt.widget
index = int(w.curselection()[0])
value = w.get(index)
w.see(index)
#datatype_chosen_option.set(value)
#datatype_listbox.select_set(index)
#datatype_listbox.see(index)
if qa.get(1.0, END).strip('\n').strip() in list(def_queries.values()):
if qa.get(1.0, END).strip('\n').strip() not in list(qd.values()):
entrytext.set(def_queries[datatype_picked.get()])
#try:
# ngmsize.destroy()
#except:
# pass
#try:
# split_contract.destroy()
#except:
# pass
# boolean interrogation arguments need fixing, right now use 0 and 1
#lem = IntVar()
#lbut = Checkbutton(interro_opt, text="Lemmatise", variable=lem, onvalue=True, offvalue=False)
#lbut.grid(column=0, row=8, sticky=W)
#phras = IntVar()
#mwbut = Checkbutton(interro_opt, text="Multiword results", variable=phras, onvalue=True, offvalue=False)
#mwbut.grid(column=1, row=8, sticky=E)
#tit_fil = IntVar()
#tfbut = Checkbutton(interro_opt, text="Filter titles", variable=tit_fil, onvalue=True, offvalue=False)
#tfbut.grid(row=9, column=0, sticky=W)
case_sensitive = IntVar()
tmp = Checkbutton(interro_opt, text="Case sensitive", variable=case_sensitive, onvalue=True, offvalue=False)
tmp.grid(row=6, column=0, sticky=W, padx=(140,0), pady=(5,0))
no_punct = IntVar()
tmp = Checkbutton(interro_opt, text="Punctuation", variable=no_punct, onvalue=False, offvalue=True)
tmp.deselect()
tmp.grid(row=6, column=0, sticky=W, pady=(5,0))
global ngmsize
Label(interro_opt, text='N-gram size:').grid(row=5, column=0, sticky=W, padx=(220,0), columnspan=2, pady=(5,0))
ngmsize = MyOptionMenu(interro_opt, 'Size','1', '2','3','4','5','6','7','8')
ngmsize.configure(width=12)
ngmsize.grid(row=5, column=1, sticky=E, pady=(5,0))
#ngmsize.config(state=DISABLED)
global collosize
Label(interro_opt, text='Collocation window:').grid(row=5, column=0, sticky=W, pady=(5,0))
collosize = MyOptionMenu(interro_opt, 'Size','1', '2','3','4','5','6','7','8')
collosize.configure(width=8)
collosize.grid(row=5, column=0, sticky=W, padx=(140,0), pady=(5,0))
#collosize.config(state=DISABLED)
#global split_contract
#split_contract = IntVar(root)
#split_contract.set(False)
#split_contract_but = Checkbutton(interro_opt, text='Split contractions', variable=split_contract, onvalue=True, offvalue=False)
#split_contract_but.grid(row=7, column=1, sticky=E)
#Label(interro_opt, text='Spelling:').grid(row=6, column=1, sticky=E, padx=(0, 75))
#spl = MyOptionMenu(interro_opt, 'Off','UK','US')
#spl.configure(width=7)
#spl.grid(row=6, column=1, sticky=E, padx=(2, 0))
def desel_and_turn_off(but):
pass
but.config(state=NORMAL)
but.deselect()
but.config(state=DISABLED)
def turnon(but):
but.config(state=NORMAL)
def callback(*args):
"""if the drop down list for data type changes, fill options"""
#datatype_listbox.delete(0, 'end')
chosen = datatype_picked.get()
#lst = option_dict[chosen]
#for e in lst:
# datatype_listbox.insert(END, e)
notree = [i for i in sorted(convert_name_to_query.keys()) if i != 'Trees']
if chosen == 'Trees':
for but in [ck5, ck6, ck7, ck9, ck10, ck11, ck12, ck13, ck14, ck15, ck16, \
ck17, ck18, ck19, ck20]:
desel_and_turn_off(but)
for but in [ck1, ck2, ck4, ck4, ck8]:
turnon(but)
ck1.select()
#q.config(state=DISABLED)
#qr.config(state=DISABLED)
#exclude.config(state=DISABLED)
#sec_match.config(state=DISABLED)
plusbut.config(state=DISABLED)
ex_plusbut.config(state=DISABLED)
elif chosen in notree:
if current_corpus.get().endswith('-parsed'):
for but in [ck1, ck2, ck3, ck5, ck6, ck7, ck8, ck9, ck10, \
ck11, ck12, ck13, ck14, ck15, ck16, \
ck17, ck18, ck19, ck20, \
plusbut, ex_plusbut, exclude, qr]:
turnon(but)
desel_and_turn_off(ck4)
if chosen == 'Stats':
nametext.set('features')
nametexter.config(state=DISABLED)
else:
nametexter.config(state=NORMAL)
nametext.set('untitled')
if chosen == 'Stats':
for but in [ck2, ck3, ck4, ck5, ck6, ck7, ck8, ck9, ck10, \
ck11, ck12, ck13, ck14, ck15, ck16]:
desel_and_turn_off(but)
turnon(ck1)
ck1.select()
ngmshows = [return_ngm, return_ngm_lemma, return_ngm_func, return_ngm_pos]
#ngmsize.config(state=NORMAL)
#collosize.config(state=NORMAL)
#if qa.get(1.0, END).strip('\n').strip() in def_queries.values() + special_examples.values():
clean_query = qa.get(1.0, END).strip('\n').strip()
acc_for_tups = [i[0] if isinstance(i, tuple) else i for i in list(def_queries.values())]
if (clean_query not in list(qd.values()) and clean_query in acc_for_tups) \
or not clean_query:
try:
# for the life of me i don't know why some are appearing as tuples
found = def_queries.get(chosen, clean_query)
if isinstance(found, tuple):
found = found[0]
entrytext.set(found)
except:
pass
datatype_picked = StringVar(root)
Label(interro_opt, text='Search: ').grid(row=3, column=0, sticky=W, pady=10)
pick_a_datatype = OptionMenu(interro_opt, datatype_picked, *sorted(convert_name_to_query.keys()))
pick_a_datatype.configure(width=30, justify=CENTER)
datatype_picked.set('Word')
pick_a_datatype.grid(row=3, column=0, columnspan=2, sticky=W, padx=(136,0))
datatype_picked.trace("w", callback)
# trees, words, functions, governors, dependents, pos, lemma, count
interro_return_frm = Frame(interro_opt)
Label(interro_return_frm, text=' Return', font=("Courier New", 13, "bold")).grid(row=0, column=0, sticky=E)
interro_return_frm.grid(row=7, column=0, columnspan=2, sticky=W, pady=10, padx=(10,0))
Label(interro_return_frm, text=' Token', font=("Courier New", 13)).grid(row=0, column=1, sticky=E)
Label(interro_return_frm, text=' Lemma', font=("Courier New", 13)).grid(row=0, column=2, sticky=E)
Label(interro_return_frm, text=' POS tag', font=("Courier New", 13)).grid(row=0, column=3, sticky=E)
Label(interro_return_frm, text= 'Function', font=("Courier New", 13)).grid(row=0, column=4, sticky=E)
Label(interro_return_frm, text=' Match', font=("Courier New", 13)).grid(row=1, column=0, sticky=E)
Label(interro_return_frm, text=' Governor', font=("Courier New", 13)).grid(row=2, column=0, sticky=E)
Label(interro_return_frm, text='Dependent', font=("Courier New", 13)).grid(row=3, column=0, sticky=E)
prenext_pos = StringVar(root)
prenext_pos.set('Position')
pick_posi_o = ('-5', '-4', '-3', '-2', '-1', '+1', '+2', '+3', '+4', '+5')
pick_posi_m = OptionMenu(interro_return_frm, prenext_pos, *pick_posi_o)
pick_posi_m.config(width=8)
pick_posi_m.grid(row=4, column=0, sticky=E)
#Label(interro_return_frm, text= 'N-gram', font=("Courier New", 13)).grid(row=4, column=0, sticky=E)
Label(interro_return_frm, text=' Other', font=("Courier New", 13)).grid(row=5, column=0, sticky=E)
Label(interro_return_frm, text=' Count', font=("Courier New", 13)).grid(row=5, column=1, sticky=E)
Label(interro_return_frm, text=' Index', font=("Courier New", 13)).grid(row=5, column=2, sticky=E)
Label(interro_return_frm, text=' Distance', font=("Courier New", 13)).grid(row=5, column=3, sticky=E)
Label(interro_return_frm, text=' Tree', font=("Courier New", 13)).grid(row=5, column=4, sticky=E)
return_token = StringVar()
return_token.set('')
ck1 = Checkbutton(interro_return_frm, variable=return_token, onvalue='w', offvalue = '')
ck1.select()
ck1.grid(row=1, column=1, sticky=E)
def return_token_callback(*args):
if datatype_picked.get() == 'Trees':
if return_token.get():
for but in [ck3, ck4, ck8]:
but.config(state=NORMAL)
but.deselect()
return_token.trace("w", return_token_callback)
return_lemma = StringVar()
return_lemma.set('')
ck2 = Checkbutton(interro_return_frm, anchor=E, variable=return_lemma, onvalue='l', offvalue = '')
ck2.grid(row=1, column=2, sticky=E)
def return_lemma_callback(*args):
if datatype_picked.get() == 'Trees':
if return_lemma.get():
for but in [ck3, ck4, ck8]:
but.config(state=NORMAL)
but.deselect()
lmt.configure(state=NORMAL)
else:
lmt.configure(state=DISABLED)
return_lemma.trace("w", return_lemma_callback)
return_pos = StringVar()
return_pos.set('')
ck3 = Checkbutton(interro_return_frm, variable=return_pos, onvalue='p', offvalue = '')
ck3.grid(row=1, column=3, sticky=E)
def return_pos_callback(*args):
if datatype_picked.get() == 'Trees':
if return_pos.get():
for but in [ck1, ck2, ck4, ck8]:
but.config(state=NORMAL)
but.deselect()
return_pos.trace("w", return_pos_callback)
return_function = StringVar()
return_function.set('')
ck7 = Checkbutton(interro_return_frm, variable=return_function, onvalue='f', offvalue = '')
ck7.grid(row=1, column=4, sticky=E)
return_tree = StringVar()
return_tree.set('')
ck4 = Checkbutton(interro_return_frm, anchor=E, variable=return_tree, onvalue='t', offvalue = '')
ck4.grid(row=6, column=4, sticky=E)
def return_tree_callback(*args):
if datatype_picked.get() == 'Trees':
if return_tree.get():
for but in [ck1, ck2, ck3, ck8]:
but.config(state=NORMAL)
but.deselect()
return_tree.trace("w", return_tree_callback)
return_tree.trace("w", return_tree_callback)
return_index = StringVar()
return_index.set('')
ck5 = Checkbutton(interro_return_frm, anchor=E, variable=return_index, onvalue='i', offvalue = '')
ck5.grid(row=6, column=2, sticky=E)
return_distance = StringVar()
return_distance.set('')
ck6 = Checkbutton(interro_return_frm, anchor=E, variable=return_distance, onvalue='a', offvalue = '')
ck6.grid(row=6, column=3, sticky=E)
return_count = StringVar()
return_count.set('')
ck8 = Checkbutton(interro_return_frm, variable=return_count, onvalue='c', offvalue = '')
ck8.grid(row=6, column=1, sticky=E)
def countmode(*args):
ngmshows = [return_ngm, return_ngm_lemma, return_ngm_func, return_ngm_pos]
ngmbuts = [ck17, ck18, ck19, ck20]
if any(ngmshow.get() for ngmshow in ngmshows):
return
if datatype_picked.get() != 'Trees':
buttons = [ck1, ck2, ck3, ck4, ck5, ck6, ck7, ck9,
ck10, ck11, ck12, ck13, ck14, ck15, ck16,
ck17, ck18, ck19, ck20]
if return_count.get() == 'c':
for b in buttons:
desel_and_turn_off(b)
ck8.config(state=NORMAL)
else:
for b in buttons:
b.config(state=NORMAL)
callback()
else:
if return_count.get():
for but in [ck1, ck2, ck3, ck4]:
but.config(state=NORMAL)
but.deselect()
return_count.trace("w", countmode)
return_gov = StringVar()
return_gov.set('')
ck9 = Checkbutton(interro_return_frm, variable=return_gov,
onvalue='gw', offvalue = '')
ck9.grid(row=2, column=1, sticky=E)
return_gov_lemma = StringVar()
return_gov_lemma.set('')
ck10 = Checkbutton(interro_return_frm, variable=return_gov_lemma,
onvalue='gl', offvalue = '')
ck10.grid(row=2, column=2, sticky=E)
return_gov_pos = StringVar()
return_gov_pos.set('')
ck11 = Checkbutton(interro_return_frm, variable=return_gov_pos,
onvalue='gp', offvalue = '')
ck11.grid(row=2, column=3, sticky=E)
return_gov_func = StringVar()
return_gov_func.set('')
ck12 = Checkbutton(interro_return_frm, variable=return_gov_func,
onvalue='gf', offvalue = '')
ck12.grid(row=2, column=4, sticky=E)
return_dep = StringVar()
return_dep.set('')
ck13 = Checkbutton(interro_return_frm, variable=return_dep,
onvalue='dw', offvalue = '')
ck13.grid(row=3, column=1, sticky=E)
return_dep_lemma = StringVar()
return_dep_lemma.set('')
ck14 = Checkbutton(interro_return_frm, variable=return_dep_lemma,
onvalue='dl', offvalue = '')
ck14.grid(row=3, column=2, sticky=E)
return_dep_pos = StringVar()
return_dep_pos.set('')
ck15 = Checkbutton(interro_return_frm, variable=return_dep_pos,
onvalue='dp', offvalue = '')
ck15.grid(row=3, column=3, sticky=E)
return_dep_func = StringVar()
return_dep_func.set('')
ck16 = Checkbutton(interro_return_frm, variable=return_dep_func,
onvalue='df', offvalue = '')
ck16.grid(row=3, column=4, sticky=E)
return_ngm = StringVar()
return_ngm.set('')
ck17 = Checkbutton(interro_return_frm, variable=return_ngm,
onvalue='w', offvalue = '')
ck17.grid(row=4, column=1, sticky=E)
return_ngm_lemma = StringVar()
return_ngm_lemma.set('')
ck18 = Checkbutton(interro_return_frm, variable=return_ngm_lemma,
onvalue='l', offvalue = '')
ck18.grid(row=4, column=2, sticky=E)
return_ngm_pos = StringVar()
return_ngm_pos.set('')
ck19 = Checkbutton(interro_return_frm, variable=return_ngm_pos,
onvalue='p', offvalue = '')
ck19.grid(row=4, column=3, sticky=E)
return_ngm_func = StringVar()
return_ngm_func.set('')
ck20 = Checkbutton(interro_return_frm, variable=return_ngm_func,
onvalue='f', offvalue = '', state=DISABLED)
ck20.grid(row=4, column=4, sticky=E)
def q_callback(*args):
qa.configure(state=NORMAL)
qr.configure(state=NORMAL)
#queries = tuple(('Off', 'Any', 'Participants', 'Processes', 'Subjects', 'Stats'))
#special_queries = StringVar(root)
#special_queries.set('Off')
#Label(interro_opt, text='Preset:').grid(row=7, column=0, sticky=W)
#pick_a_query = OptionMenu(interro_opt, special_queries, *queries)
#pick_a_query.config(width=11, state=DISABLED)
#pick_a_query.grid(row=7, column=0, padx=(60, 0), columnspan=2, sticky=W)
#special_queries.trace("w", q_callback)
# Interrogation name
nametext=StringVar()
nametext.set('untitled')
Label(interro_opt, text='Interrogation name:').grid(row=17, column=0, sticky=W)
nametexter = Entry(interro_opt, textvariable=nametext, width=15)
nametexter.grid(row=17, column=1, sticky=E)
all_text_widgets.append(nametexter)
def show_help(kind):
kindict = {'h': 'http://interrogator.github.io/corpkit/doc_help.html',
'q': 'http://interrogator.github.io/corpkit/doc_interrogate.html#trees',
't': 'http://interrogator.github.io/corpkit/doc_troubleshooting.html'}
import webbrowser
webbrowser.open_new(kindict[kind])
# query help, interrogate button
#Button(interro_opt, text='Query help', command=query_help).grid(row=14, column=0, sticky=W)
interrobut = Button(interro_opt, text='Interrogate')
interrobut.config(command=lambda: runner(interrobut, do_interrogation, conc=True), state=DISABLED)
interrobut.grid(row=18, column=1, sticky=E)
# name to show above spreadsheet 0
i_resultname = StringVar()
def change_interro_spread(*args):
if name_of_interro_spreadsheet.get():
#savdict.config(state=NORMAL)
updbut.config(state=NORMAL)
else:
#savdict.config(state=DISABLED)
updbut.config(state=DISABLED)
name_of_interro_spreadsheet = StringVar()
name_of_interro_spreadsheet.set('')
name_of_interro_spreadsheet.trace("w", change_interro_spread)
i_resultname.set('Interrogation results: %s' % str(name_of_interro_spreadsheet.get()))
# make spreadsheet frames for interrogate pane
wdth = int(note_width * 0.50)
interro_right = Frame(tab1, width=wdth)
interro_right.grid(row=0, column=1, sticky=N)
interro_results = Frame(interro_right, height=40, width=wdth, borderwidth=2)
interro_results.grid(column=0, row=0, padx=20, pady=(20,0), sticky='N', columnspan=4)
interro_totals = Frame(interro_right, height=1, width=20, borderwidth=2)
interro_totals.grid(column=0, row=1, padx=20, columnspan=4)
llab = Label(interro_right, textvariable=i_resultname,
font=("Helvetica", 13, "bold"))
llab.grid(row=0, column=0, sticky='NW', padx=20, pady=0)
llab.lift()
# show nothing yet
update_spreadsheet(interro_results, df_to_show=None, height=450, width=wdth)
update_spreadsheet(interro_totals, df_to_show=None, height=10, width=wdth)
#global prev
four_interro_under = Frame(interro_right, width=wdth)
four_interro_under.grid(row=3, column=0, sticky='ew', padx=(20,0))
prev = Button(four_interro_under, text='Previous', command=show_prev)
prev.pack(side='left', expand=True)
#global nex
nex = Button(four_interro_under, text='Next', command=show_next)
nex.pack(side='left', expand=True, padx=(0,50))
if len(list(all_interrogations.keys())) < 2:
nex.configure(state=DISABLED)
prev.configure(state=DISABLED)
#savdict = Button(four_interro_under, text='Save as dictionary', command=save_as_dictionary)
#savdict.config(state=DISABLED)
#savdict.pack(side='right', expand=True)
updbut = Button(four_interro_under, text='Update interrogation', command=lambda: update_all_interrogations(pane='interrogate'))
updbut.pack(side='right', expand=True)
updbut.config(state=DISABLED)
############## ############## ############## ############## ##############
# EDITOR TAB # # EDITOR TAB # # EDITOR TAB # # EDITOR TAB # # EDITOR TAB #
############## ############## ############## ############## ##############
editor_buttons = Frame(tab2)
editor_buttons.grid(row=0, column=0, sticky='NW')
def do_editing():
"""
What happens when you press edit
"""
edbut.config(state=DISABLED)
import os
import pandas as pd
from corpkit.editor import editor
# translate operation into interrogator input
operation_text=opp.get()
if operation_text == 'None' or operation_text == 'Select an operation':
operation_text=None
else:
operation_text=opp.get()[0]
if opp.get() == u"\u00F7":
operation_text='/'
if opp.get() == u"\u00D7":
operation_text='*'
if opp.get() == '%-diff':
operation_text='d'
if opp.get() == 'rel. dist.':
operation_text='a'
# translate dataframe2
data2 = data2_pick.get()
if data2 == 'None' or data2 == '':
data2 = False
elif data2 == 'Self':
data2 = 'self'
elif data2 in ['features', 'postags', 'wordclasses']:
from corpkit.corpus import Corpus
corp = Corpus(current_corpus.get(), print_info=False)
data2 = getattr(corp, data2_pick.get())
#todo: populate results/totals with possibilities for features etc
elif data2 is not False:
if df2branch.get() == 'results':
try:
data2 = getattr(all_interrogations[data2], df2branch.get())
except AttributeError:
timestring('Denominator has no results attribute.')
return
elif df2branch.get() == 'totals':
try:
data2 = getattr(all_interrogations[data2], df2branch.get())
except AttributeError:
timestring('Denominator has no totals attribute.')
return
if transpose.get():
try:
data2 = data2.T
except:
pass
the_data = all_interrogations[name_of_o_ed_spread.get()]
if df1branch.get() == 'results':
if not hasattr(the_data, 'results'):
timestring('Interrogation has no results attribute.')
return
elif df1branch.get() == 'totals':
data1 = the_data.totals
if (spl_editor.var).get() == 'Off' or (spl_editor.var).get() == 'Convert spelling':
spel = False
else:
spel = (spl_editor.var).get()
# editor kwargs
editor_args = {'operation': operation_text,
'dataframe2': data2,
'spelling': spel,
'sort_by': sort_trans[sort_val.get()],
'df1_always_df': True,
'root': root,
'note': note,
'packdir': rd,
'p': p_val.get()}
if do_sub.get() == 'Merge':
editor_args['merge_subcorpora'] = subc_sel_vals
elif do_sub.get() == 'Keep':
editor_args['just_subcorpora'] = subc_sel_vals
elif do_sub.get() == 'Span':
editor_args['span_subcorpora'] = subc_sel_vals
elif do_sub.get() == 'Skip':
editor_args['skip_subcorpora'] = subc_sel_vals
if toreplace_string.get() != '':
if replacewith_string.get() == '':
replacetup = toreplace_string.get()
else:
replacetup = (toreplace_string.get(), replacewith_string.get())
editor_args['replace_names'] = replacetup
# special query: add to this list!
#if special_queries.get() != 'Off':
#query = spec_quer_translate[special_queries.get()]
entry_do_with = entry_regex.get()
# allow list queries
if entry_do_with.startswith('[') and entry_do_with.endswith(']') and ',' in entry_do_with:
entry_do_with = entry_do_with.lower().lstrip('[').rstrip(']').replace("'", '').replace('"', '').replace(' ', '').split(',')
else:
# convert special stuff
re.compile(entry_do_with)
entry_do_with = remake_special(entry_do_with, customs=custom_special_dict,
case_sensitive=case_sensitive.get(),
return_list=True)
if entry_do_with is False:
return
if do_with_entries.get() == 'Merge':
editor_args['merge_entries'] = entry_do_with
nn = newname_var.get()
if nn == '':
editor_args['newname'] = False
elif is_number(nn):
editor_args['newname'] = int(nn)
else:
editor_args['newname'] = nn
elif do_with_entries.get() == 'Keep':
editor_args['just_entries'] = entry_do_with
elif do_with_entries.get() == 'Skip':
editor_args['skip_entries'] = entry_do_with
if new_subc_name.get() != '':
editor_args['new_subcorpus_name'] = new_subc_name.get()
if newname_var.get() != '':
editor_args['new_subcorpus_name'] = newname_var.get()
if keep_stats_setting.get() == 1:
editor_args['keep_stats'] = True
if rem_abv_p_set.get() == 1:
editor_args['remove_above_p'] = True
if just_tot_setting.get() == 1:
editor_args['just_totals'] = True
if keeptopnum.get() != 'all':
try:
numtokeep = int(keeptopnum.get())
except ValueError:
timestring('Keep top n results value must be number.')
return
editor_args['keep_top'] = numtokeep
# do editing
r = the_data.edit(branch=df1branch.get(), **editor_args)
if transpose.get():
try:
r.results = r.results.T
except:
pass
try:
r.totals = r.totals.T
except:
pass
if isinstance(r, str):
if r == 'linregress':
return
if not r:
timestring('Editing caused an error.')
return
if len(list(r.results.columns)) == 0:
timestring('Editing removed all results.')
return
# drop over 1000?
# results should now always be dataframes, so this if is redundant
if isinstance(r.results, pd.DataFrame):
large = [n for i, n in enumerate(list(r.results.columns)) if i > 9999]
r.results.drop(large, axis=1, inplace=True)
timestring('Result editing completed successfully.')
# name the edit
the_name = namer(edit_nametext.get(), type_of_data = 'edited')
# add edit to master dict
all_interrogations[the_name] = r
# update edited results speadsheet name
name_of_n_ed_spread.set(list(all_interrogations.keys())[-1])
editoname.set('Edited results: %s' % str(name_of_n_ed_spread.get()))
# add current subcorpora to editor menu
for subcl in [subc_listbox]:
#subcl.configure(state=NORMAL)
subcl.delete(0, 'end')
for e in list(r.results.index):
if e != 'tkintertable-order':
subcl.insert(END, e)
#subcl.configure(state=DISABLED)
# update edited spreadsheets
most_recent = all_interrogations[list(all_interrogations.keys())[-1]]
if most_recent.results is not None:
update_spreadsheet(n_editor_results, most_recent.results, height=140)
update_spreadsheet(n_editor_totals, pd.DataFrame(most_recent.totals, dtype=object), height=10)
# finish up
refresh()
# reset some buttons that the user probably wants reset
opp.set('None')
data2_pick.set('Self')
# restore button
def df2_callback(*args):
try:
thisdata = all_interrogations[data2_pick.get()]
except KeyError:
return
if thisdata.results is not None:
df2box.config(state=NORMAL)
else:
df2box.config(state=NORMAL)
df2branch.set('totals')
df2box.config(state=DISABLED)
def df_callback(*args):
"""show names and spreadsheets for what is selected as result to edit
also, hide the edited results section"""
if selected_to_edit.get() != 'None':
edbut.config(state=NORMAL)
name_of_o_ed_spread.set(selected_to_edit.get())
thisdata = all_interrogations[selected_to_edit.get()]
resultname.set('Results to edit: %s' % str(name_of_o_ed_spread.get()))
if thisdata.results is not None:
update_spreadsheet(o_editor_results, thisdata.results, height=140)
df1box.config(state=NORMAL)
else:
df1box.config(state=NORMAL)
df1branch.set('totals')
df1box.config(state=DISABLED)
update_spreadsheet(o_editor_results, df_to_show=None, height=140)
if thisdata.totals is not None:
update_spreadsheet(o_editor_totals, thisdata.totals, height=10)
#df1box.config(state=NORMAL)
#else:
#update_spreadsheet(o_editor_totals, df_to_show=None, height=10)
#df1box.config(state=NORMAL)
#df1branch.set('results')
#df1box.config(state=DISABLED)
else:
edbut.config(state=DISABLED)
name_of_n_ed_spread.set('')
editoname.set('Edited results: %s' % str(name_of_n_ed_spread.get()))
update_spreadsheet(n_editor_results, df_to_show=None, height=140)
update_spreadsheet(n_editor_totals, df_to_show=None, height=10)
for subcl in [subc_listbox]:
subcl.configure(state=NORMAL)
subcl.delete(0, 'end')
if name_of_o_ed_spread.get() != '':
if thisdata.results is not None:
cols = list(thisdata.results.index)
else:
cols = list(thisdata.totals.index)
for e in cols:
if e != 'tkintertable-order':
subcl.insert(END, e)
do_sub.set('Off')
do_with_entries.set('Off')
# result to edit
tup = tuple([i for i in list(all_interrogations.keys())])
selected_to_edit = StringVar(root)
selected_to_edit.set('None')
x = Label(editor_buttons, text='To edit', font=("Helvetica", 13, "bold"))
x.grid(row=0, column=0, sticky=W)
dataframe1s = OptionMenu(editor_buttons, selected_to_edit, *tup)
dataframe1s.config(width=25)
dataframe1s.grid(row=1, column=0, columnspan=2, sticky=W)
selected_to_edit.trace("w", df_callback)
# DF1 branch selection
df1branch = StringVar()
df1branch.set('results')
df1box = OptionMenu(editor_buttons, df1branch, 'results', 'totals')
df1box.config(width=11, state=DISABLED)
df1box.grid(row=1, column=1, sticky=E)
def op_callback(*args):
if opp.get() != 'None':
dataframe2s.config(state=NORMAL)
df2box.config(state=NORMAL)
if opp.get() == 'keywords' or opp.get() == '%-diff':
df2branch.set('results')
elif opp.get() == 'None':
dataframe2s.config(state=DISABLED)
df2box.config(state=DISABLED)
# operation for editor
opp = StringVar(root)
opp.set('None')
operations = ('None', '%', u"\u00D7", u"\u00F7", '-', '+', 'combine', 'keywords', '%-diff', 'rel. dist.')
Label(editor_buttons, text='Operation and denominator', font=("Helvetica", 13, "bold")).grid(row=2, column=0, sticky=W, pady=(15,0))
ops = OptionMenu(editor_buttons, opp, *operations)
ops.grid(row=3, column=0, sticky=W)
opp.trace("w", op_callback)
# DF2 option for editor
tups = tuple(['Self'] + [i for i in list(all_interrogations.keys())])
data2_pick = StringVar(root)
data2_pick.set('Self')
#Label(tab2, text='Denominator:').grid(row=3, column=0, sticky=W)
dataframe2s = OptionMenu(editor_buttons, data2_pick, *tups)
dataframe2s.config(state=DISABLED, width=16)
dataframe2s.grid(row=3, column=0, columnspan=2, sticky='NW', padx=(110,0))
data2_pick.trace("w", df2_callback)
# DF2 branch selection
df2branch = StringVar(root)
df2branch.set('totals')
df2box = OptionMenu(editor_buttons, df2branch, 'results', 'totals')
df2box.config(state=DISABLED, width=11)
df2box.grid(row=3, column=1, sticky=E)
# sort by
Label(editor_buttons, text='Sort results by', font=("Helvetica", 13, "bold")).grid(row=4, column=0, sticky=W, pady=(15,0))
sort_val = StringVar(root)
sort_val.set('None')
poss = ['None', 'Total', 'Inverse total', 'Name','Increase',
'Decrease', 'Static', 'Turbulent', 'P value', 'Reverse']
sorts = OptionMenu(editor_buttons, sort_val, *poss)
sorts.config(width=11)
sorts.grid(row=4, column=1, sticky=E, pady=(15,0))
# spelling again
Label(editor_buttons, text='Spelling:').grid(row=5, column=0, sticky=W, pady=(15,0))
spl_editor = MyOptionMenu(editor_buttons, 'Off','UK','US')
spl_editor.grid(row=5, column=1, sticky=E, pady=(15,0))
spl_editor.configure(width=10)
# keep_top
Label(editor_buttons, text='Keep top results:').grid(row=6, column=0, sticky=W)
keeptopnum = StringVar()
keeptopnum.set('all')
keeptopbox = Entry(editor_buttons, textvariable=keeptopnum, width=5)
keeptopbox.grid(column=1, row=6, sticky=E)
all_text_widgets.append(keeptopbox)
# currently broken: just totals button
just_tot_setting = IntVar()
just_tot_but = Checkbutton(editor_buttons, text="Just totals", variable=just_tot_setting, state=DISABLED)
#just_tot_but.select()
just_tot_but.grid(column=0, row=7, sticky=W)
keep_stats_setting = IntVar()
keep_stat_but = Checkbutton(editor_buttons, text="Keep stats", variable=keep_stats_setting)
#keep_stat_but.select()
keep_stat_but.grid(column=1, row=7, sticky=E)
rem_abv_p_set = IntVar()
rem_abv_p_but = Checkbutton(editor_buttons, text="Remove above p", variable=rem_abv_p_set)
#rem_abv_p_but.select()
rem_abv_p_but.grid(column=0, row=8, sticky=W)
# transpose
transpose = IntVar()
trans_but = Checkbutton(editor_buttons, text="Transpose", variable=transpose, onvalue=True, offvalue=False)
trans_but.grid(column=1, row=8, sticky=E)
# entries + entry field for regex, off, skip, keep, merge
Label(editor_buttons, text='Edit entries', font=("Helvetica", 13, "bold")).grid(row=9, column=0, sticky=W, pady=(15, 0))
# edit entries regex box
entry_regex = StringVar()
entry_regex.set(r'.*ing$')
edit_box = Entry(editor_buttons, textvariable=entry_regex, width=23, state=DISABLED, font=("Courier New", 13))
edit_box.grid(row=10, column=1, sticky=E)
all_text_widgets.append(edit_box)
# merge entries newname
Label(editor_buttons, text='Merge name:').grid(row=11, column=0, sticky=W)
newname_var = StringVar()
newname_var.set('')
mergen = Entry(editor_buttons, textvariable=newname_var, width=23, state=DISABLED, font=("Courier New", 13))
mergen.grid(row=11, column=1, sticky=E)
all_text_widgets.append(mergen)
Label(editor_buttons, text='Replace in entry names:').grid(row=12, column=0, sticky=W)
Label(editor_buttons, text='Replace with:').grid(row=12, column=1, sticky=W)
toreplace_string = StringVar()
toreplace_string.set('')
replacewith_string = StringVar()
replacewith_string.set('')
toreplace = Entry(editor_buttons, textvariable=toreplace_string, font=("Courier New", 13))
toreplace.grid(row=13, column=0, sticky=W)
all_text_widgets.append(toreplace)
replacewith = Entry(editor_buttons, textvariable=replacewith_string, font=("Courier New", 13), width=23)
replacewith.grid(row=13, column=1, sticky=E)
all_text_widgets.append(replacewith)
def do_w_callback(*args):
"""if not merging entries, diable input fields"""
if do_with_entries.get() != 'Off':
edit_box.configure(state=NORMAL)
else:
edit_box.configure(state=DISABLED)
if do_with_entries.get() == 'Merge':
mergen.configure(state=NORMAL)
else:
mergen.configure(state=DISABLED)
# options for editing entries
do_with_entries = StringVar(root)
do_with_entries.set('Off')
edit_ent_op = ('Off', 'Skip', 'Keep', 'Merge')
ed_op = OptionMenu(editor_buttons, do_with_entries, *edit_ent_op)
ed_op.grid(row=10, column=0, sticky=W)
do_with_entries.trace("w", do_w_callback)
def onselect_subc(evt):
"""get selected subcorpora: this probably doesn't need to be
a callback, as they are only needed during do_edit"""
for i in subc_sel_vals:
subc_sel_vals.pop()
wx = evt.widget
indices = wx.curselection()
for index in indices:
value = wx.get(index)
if value not in subc_sel_vals:
subc_sel_vals.append(value)
def do_s_callback(*args):
"""hide subcorpora edit options if off"""
if do_sub.get() != 'Off':
pass
#subc_listbox.configure(state=NORMAL)
else:
pass
#subc_listbox.configure(state=DISABLED)
if do_sub.get() == 'Merge':
merge.configure(state=NORMAL)
else:
merge.configure(state=DISABLED)
# subcorpora + optionmenu off, skip, keep
Label(editor_buttons, text='Edit subcorpora', font=("Helvetica", 13, "bold")).grid(row=14, column=0, sticky=W, pady=(15,0))
edit_sub_f = Frame(editor_buttons)
edit_sub_f.grid(row=14, column=1, rowspan = 5, sticky=E, pady=(20,0))
edsub_scbr = Scrollbar(edit_sub_f)
edsub_scbr.pack(side=RIGHT, fill=Y)
subc_listbox = Listbox(edit_sub_f, selectmode = EXTENDED, height=5, relief=SUNKEN, bg='#F4F4F4',
yscrollcommand=edsub_scbr.set, exportselection=False)
subc_listbox.pack(fill=BOTH)
edsub_scbr.config(command=subc_listbox.yview)
xx = subc_listbox.bind('<<ListboxSelect>>', onselect_subc)
subc_listbox.select_set(0)
# subcorpora edit options
do_sub = StringVar(root)
do_sub.set('Off')
do_with_subc = OptionMenu(editor_buttons, do_sub, *('Off', 'Skip', 'Keep', 'Merge', 'Span'))
do_with_subc.grid(row=15, column=0, sticky=W)
do_sub.trace("w", do_s_callback)
# subcorpora merge name
Label(editor_buttons, text='Merge name:').grid(row=16, column=0, sticky='NW')
new_subc_name = StringVar()
new_subc_name.set('')
merge = Entry(editor_buttons, textvariable=new_subc_name, state=DISABLED, font=("Courier New", 13))
merge.grid(row=17, column=0, sticky='SW', pady=(0, 10))
all_text_widgets.append(merge)
# name the edit
edit_nametext=StringVar()
edit_nametext.set('untitled')
Label(editor_buttons, text='Edit name', font=("Helvetica", 13, "bold")).grid(row=19, column=0, sticky=W)
msn = Entry(editor_buttons, textvariable=edit_nametext, width=18)
msn.grid(row=20, column=0, sticky=W)
all_text_widgets.append(msn)
# edit button
edbut = Button(editor_buttons, text='Edit')
edbut.config(command=lambda: runner(edbut, do_editing), state=DISABLED)
edbut.grid(row=20, column=1, sticky=E)
def editor_spreadsheet_showing_something(*args):
"""if there is anything in an editor window, allow spreadsheet edit button"""
if name_of_o_ed_spread.get():
upd_ed_but.config(state=NORMAL)
else:
upd_ed_but.config(state=DISABLED)
# show spreadsheets
e_wdth = int(note_width * 0.55)
editor_sheets = Frame(tab2)
editor_sheets.grid(column=1, row=0, sticky='NE')
resultname = StringVar()
name_of_o_ed_spread = StringVar()
name_of_o_ed_spread.set('')
name_of_o_ed_spread.trace("w", editor_spreadsheet_showing_something)
resultname.set('Results to edit: %s' % str(name_of_o_ed_spread.get()))
o_editor_results = Frame(editor_sheets, height=28, width=20)
o_editor_results.grid(column=1, row=1, rowspan=1, padx=(20, 0), sticky=N)
Label(editor_sheets, textvariable=resultname,
font=("Helvetica", 13, "bold")).grid(row=0,
column=1, sticky='NW', padx=(20,0))
#Label(editor_sheets, text='Totals to edit:',
#font=("Helvetica", 13, "bold")).grid(row=4,
#column=1, sticky=W, pady=0)
o_editor_totals = Frame(editor_sheets, height=1, width=20)
o_editor_totals.grid(column=1, row=1, rowspan=1, padx=(20,0), sticky=N, pady=(220,0))
update_spreadsheet(o_editor_results, df_to_show=None, height=160, width=e_wdth)
update_spreadsheet(o_editor_totals, df_to_show=None, height=10, width=e_wdth)
editoname = StringVar()
name_of_n_ed_spread = StringVar()
name_of_n_ed_spread.set('')
editoname.set('Edited results: %s' % str(name_of_n_ed_spread.get()))
Label(editor_sheets, textvariable=editoname,
font=("Helvetica", 13, "bold")).grid(row=1,
column=1, sticky='NW', padx=(20,0), pady=(290,0))
n_editor_results = Frame(editor_sheets, height=28, width=20)
n_editor_results.grid(column=1, row=1, rowspan=1, sticky=N, padx=(20,0), pady=(310,0))
#Label(editor_sheets, text='Edited totals:',
#font=("Helvetica", 13, "bold")).grid(row=15,
#column=1, sticky=W, padx=20, pady=0)
n_editor_totals = Frame(editor_sheets, height=1, width=20)
n_editor_totals.grid(column=1, row=1, rowspan=1, padx=(20,0), pady=(500,0))
update_spreadsheet(n_editor_results, df_to_show=None, height=160, width=e_wdth)
update_spreadsheet(n_editor_totals, df_to_show=None, height=10, width=e_wdth)
# add button to update
upd_ed_but = Button(editor_sheets, text='Update interrogation(s)', command=lambda: update_all_interrogations(pane='edit'))
if not small_screen:
upd_ed_but.grid(row=1, column=1, sticky=E, padx=(0, 40), pady=(594, 0))
else:
upd_ed_but.grid(row=0, column=1, sticky='NE', padx=(20,0))
upd_ed_but.config(state=DISABLED)
################# ################# ################# #################
# VISUALISE TAB # # VISUALISE TAB # # VISUALISE TAB # # VISUALISE TAB #
################# ################# ################# #################
plot_option_frame = Frame(tab3)
plot_option_frame.grid(row=0, column=0, sticky='NW')
def do_plotting():
"""when you press plot"""
plotbut.config(state=DISABLED)
# junk for showing the plot in tkinter
for i in oldplotframe:
i.destroy()
import matplotlib
matplotlib.use('TkAgg')
#from numpy import arange, sin, pi
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
# implement the default mpl key bindings
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
from corpkit.plotter import plotter
if data_to_plot.get() == 'None':
timestring('No data selected to plot.')
return
if plotbranch.get() == 'results':
if all_interrogations[data_to_plot.get()].results is None:
timestring('No results branch to plot.')
return
what_to_plot = all_interrogations[data_to_plot.get()].results
elif plotbranch.get() == 'totals':
if all_interrogations[data_to_plot.get()].totals is None:
timestring('No totals branch to plot.')
return
what_to_plot = all_interrogations[data_to_plot.get()].totals
if single_entry.get() != 'All':
what_to_plot = what_to_plot[single_entry.get()]
if single_sbcp.get() != 'All':
what_to_plot = what_to_plot.ix[single_sbcp.get()]
if transpose_vis.get():
if plotbranch.get() != 'totals':
what_to_plot = what_to_plot.T
# determine num to plot
def determine_num_to_plot(num):
"""translate num to num_to_plot"""
try:
num = int(num)
except:
if num.lower() == 'all':
num = 'all'
else:
num = 7
number_to_plot.set('7')
return num
num = determine_num_to_plot(number_to_plot.get())
the_kind = charttype.get()
if the_kind == 'Type of chart':
the_kind = 'line'
# plotter options
d = {'num_to_plot': num,
'kind': the_kind,
'indices': False}
if the_kind == 'heatmap':
d['robust'] = True
#the_style =
#if the_style == 'matplotlib':
#lgd = plt.legend(handles[: the_style = False
d['style'] = plot_style.get()
# explode option
if explbox.get() != '' and charttype.get() == 'pie':
if explbox.get().startswith('[') and explbox.get().endswith(']') and ',' in explbox.get():
explval = explbox.get().lstrip('[').rstrip(']').replace("'", '').replace('"', '').replace(' ', '').split(',')
else:
explval = explbox.get().strip()
explval = remake_special(explval, customs=custom_special_dict,
case_sensitive=case_sensitive.get())
d['explode'] = explval
# this code is ridiculous
d['tex'] = bool(texuse.get())
d['black_and_white'] = bool(bw.get())
d['reverse_legend'] = bool(rl.get())
d['subplots'] = bool(sbplt.get())
if bool(sbplt.get()):
d['layout'] = (int(lay1.get()), int(lay2.get()))
d['grid'] = bool(gridv.get())
d['stacked'] = bool(stackd.get())
d['partial_pie'] = bool(part_pie.get())
d['filled'] = bool(filledvar.get())
d['logx'] = bool(log_x.get())
d['logy'] = bool(log_y.get())
if x_axis_l.get() != '':
d['x_label'] = x_axis_l.get()
if x_axis_l.get() == 'None':
d['x_label'] = False
if y_axis_l.get() != '':
d['y_label'] = y_axis_l.get()
if y_axis_l.get() == 'None':
d['y_label'] = False
d['cumulative'] = bool(cumul.get())
d['colours'] = chart_cols.get()
legend_loc = legloc.get()
if legend_loc == 'none':
d['legend'] = False
else:
d['legend_pos'] = legend_loc
if showtot.get() == 'legend + plot':
d['show_totals'] = 'both'
else:
d['show_totals'] = showtot.get()
d['figsize'] = (int(figsiz1.get()), int(figsiz2.get()))
if len(what_to_plot.index) == 1:
what_to_plot = what_to_plot.ix[what_to_plot.index[0]]
if debug:
print('Plotter args:', what_to_plot, plotnametext.get(), d)
f = plotter(what_to_plot, plotnametext.get(), **d)
# latex error
#except RuntimeError as e:
# s = str(e)
# print(s)
# split_report = s.strip().split('Here is the full report generated by LaTeX:')
# try:
# if len(split_report) > 0 and split_report[1] != '':
# timestring('LaTeX error: %s' % split_report[1])
# except:
# timestring('LaTeX error: %s' % split_report)
# else:
# timestring('No TeX distribution found. Disabling TeX option.')
# texuse.set(0)
# tbut.config(state=DISABLED)
#
# return
timestring('%s plotted.' % plotnametext.get())
del oldplotframe[:]
def getScrollingCanvas(frame):
"""
Adds a new canvas with scroll bars to the argument frame
NB: uses grid layout
return: the newly created canvas
"""
frame.grid(column=1, row=0, rowspan = 1, padx=(15, 15), pady=(40, 0), columnspan=3, sticky='NW')
#frame.rowconfigure(0, weight=9)
#frame.columnconfigure(0, weight=9)
fig_frame_height = 440 if small_screen else 500
canvas = Canvas(frame, width=980, height=fig_frame_height)
xScrollbar = Scrollbar(frame, orient=HORIZONTAL)
yScrollbar = Scrollbar(frame)
xScrollbar.pack(side=BOTTOM,fill=X)
yScrollbar.pack(side=RIGHT,fill=Y)
canvas.config(xscrollcommand=xScrollbar.set)
xScrollbar.config(command=canvas.xview)
canvas.config(yscrollcommand=yScrollbar.set)
yScrollbar.config(command=canvas.yview)
canvas.pack(side=LEFT,expand=True,fill=BOTH)
return canvas
frame_for_fig = Frame(tab3)
#frame_for_fig
scrollC = getScrollingCanvas(frame_for_fig)
mplCanvas = FigureCanvasTkAgg(f.gcf(), frame_for_fig)
mplCanvas._tkcanvas.config(highlightthickness=0)
canvas = mplCanvas.get_tk_widget()
canvas.pack()
if frame_for_fig not in boxes:
boxes.append(frame_for_fig)
scrollC.create_window(0, 0, window=canvas)
scrollC.config(scrollregion=scrollC.bbox(ALL))
#hbar=Scrollbar(frame_for_fig,orient=HORIZONTAL)
#hbar.pack(side=BOTTOM,fill=X)
#hbar.config(command=canvas.get_tk_widget().xview)
#vbar=Scrollbar(frame_for_fig,orient=VERTICAL)
#vbar.pack(side=RIGHT,fill=Y)
#vbar.config(command=canvas.get_tk_widget().yview)
##canvas.config(width=300,height=300)
#canvas.config(xscrollcommand=hbar.set, yscrollcommand=vbar.set)
#canvas.pack(side=LEFT,expand=True,fill=BOTH)
try:
mplCanvas.show()
except RuntimeError as e:
s = str(e)
print(s)
split_report = s.strip().split('Here is the full report generated by LaTeX:')
if len(split_report) > 0 and split_report[1] != '':
timestring('LaTeX error: %s' % split_report[1])
else:
timestring('No TeX distribution found. Disabling TeX option.')
texuse.set(0)
tbut.config(state=DISABLED)
return
oldplotframe.append(mplCanvas.get_tk_widget())
del thefig[:]
toolbar_frame = Frame(tab3, borderwidth=0)
toolbar_frame.grid(row=0, column=1, columnspan=3, sticky='NW', padx=(400,0), pady=(600,0))
toolbar_frame.lift()
oldplotframe.append(toolbar_frame)
toolbar = NavigationToolbar2TkAgg(mplCanvas,toolbar_frame)
toolbar.update()
thefig.append(f.gcf())
savedplot.set('Saved image: ')
images = {'the_current_fig': -1}
def move(direction='forward'):
import os
try:
from PIL import Image
from PIL import ImageTk
except ImportError:
timestring("You need PIL/Pillow installed to do this.")
return
for i in oldplotframe:
i.destroy()
del oldplotframe[:]
# maybe sort by date added?
image_list = [i for i in all_images]
if len(image_list) == 0:
timestring('No images found in images folder.')
return
# figure out where we're up to
if images['the_current_fig'] != -1:
ind = image_list.index(images['the_current_fig'])
else:
ind = -1
if direction == 'forward':
newind = ind + 1
else:
newind = ind - 1
if newind < 1:
pbut.configure(state=DISABLED)
else:
pbut.configure(state=NORMAL)
if newind + 1 == len(image_list):
nbut.configure(state=DISABLED)
else:
nbut.configure(state=NORMAL)
imf = image_list[newind]
if not imf.endswith('.png'):
imf = imf + '.png'
image = Image.open(os.path.join(image_fullpath.get(), imf))
image_to_measure = ImageTk.PhotoImage(image)
old_height=image_to_measure.height()
old_width=image_to_measure.width()
def determine_new_dimensions(height, width):
maxh = 500
maxw = 1000
diff = float(height) / float(width)
if diff > 1:
# make height max
newh = maxh
# figure out level of magnification
prop = maxh / float(height)
neww = width * prop
elif diff < 1:
neww = maxw
prop = maxw / float(width)
newh = height * prop
elif diff == 1:
newh = maxh
neww = maxw
return (int(neww), int(newh))
# calculate new dimensions
newdimensions = determine_new_dimensions(old_height, old_width)
# determine left padding
padxright = 20
if newdimensions[0] != 1000:
padxleft = ((1000 - newdimensions[0]) / 2) + 40
else:
padxleft = 40
padytop = (500 - newdimensions[1]) / 2
def makezero(n):
if n < 0:
return 0
else:
return n
padxright = makezero(padxright)
padxleft = makezero(padxleft)
padytop = makezero(padytop)
image = image.resize(newdimensions)
image = ImageTk.PhotoImage(image)
frm = Frame(tab3, height=500, width=1000)
frm.grid(column=1, row=0, rowspan = 1, padx=(padxleft, padxright), \
pady=padytop, columnspan=3)
gallframe = Label(frm, image = image, justify=CENTER)
gallframe.pack(anchor='center', fill=BOTH)
oldplotframe.append(frm)
images[image_list[newind]] = image
images['the_current_fig'] = image_list[newind]
savedplot.set('Saved image: %s' % os.path.splitext(image_list[newind])[0])
timestring('Viewing %s' % os.path.splitext(image_list[newind])[0])
savedplot = StringVar()
savedplot.set('View saved images: ')
tmp = Label(tab3, textvariable=savedplot, font=("Helvetica", 13, "bold"))
padding = 555 if small_screen else 616
tmp.grid(row=0, column=1, padx=(40,0), pady=(padding-50,0), sticky=W)
pbut = Button(tab3, text='Previous', command=lambda: move(direction='back'))
pbut.grid(row=0, column=1, padx=(40,0), pady=(padding, 0), sticky=W)
pbut.config(state=DISABLED)
nbut = Button(tab3, text='Next', command=lambda: move(direction = 'forward'))
nbut.grid(row=0, column=1, padx=(160,0), pady=(padding, 0), sticky=W)
nbut.config(state=DISABLED)
# not in use while using the toolbar instead...
#def save_current_image():
# import os
# # figre out filename
# filename = namer(plotnametext.get(), type_of_data = 'image') + '.png'
# import sys
# defaultextension = '.png' if sys.platform == 'darwin' else ''
# kwarg = {'defaultextension': defaultextension,
# #'filetypes': [('all files', '.*'),
# #('png file', '.png')],
# 'initialfile': filename}
# imagedir = image_fullpath.get()
# if imagedir:
# kwarg['initialdir'] = imagedir
# fo = tkFileDialog.asksaveasfilename(**kwarg)
# if fo is None: # asksaveasfile return `None` if dialog closed with "cancel".
# return
# thefig[0].savefig(os.path.join(image_fullpath.get(), fo))
# timestring('%s saved to %s.' % (fo, image_fullpath.get()))
# title tab
Label(plot_option_frame, text='Image title:').grid(row=0, column=0, sticky='W', pady=(10, 0))
plotnametext=StringVar()
plotnametext.set('Untitled')
image_title_entry = Entry(plot_option_frame, textvariable=plotnametext)
image_title_entry.grid(row=0, column=1, pady=(10, 0))
all_text_widgets.append(image_title_entry)
def plot_callback(*args):
"""enable/disable based on selected dataset for plotting"""
if data_to_plot.get() == 'None':
plotbut.config(state=DISABLED)
else:
plotbut.config(state=NORMAL)
try:
thisdata = all_interrogations[data_to_plot.get()]
except KeyError:
return
single_entry.set('All')
single_sbcp.set('All')
subdrs = sorted(set([d for d in os.listdir(corpus_fullpath.get()) \
if os.path.isdir(os.path.join(corpus_fullpath.get(),d))]))
single_sbcp_optmenu.config(state=NORMAL)
single_sbcp_optmenu['menu'].delete(0, 'end')
single_sbcp_optmenu['menu'].add_command(label='All', command=_setit(single_sbcp, 'All'))
lst = []
if len(subdrs) > 0:
for c in subdrs:
lst.append(c)
single_sbcp_optmenu['menu'].add_command(label=c, command=_setit(single_sbcp, c))
single_entry_or_subcorpus['subcorpora'] = lst
else:
single_sbcp_optmenu.config(state=NORMAL)
single_sbcp_optmenu['menu'].delete(0, 'end')
single_sbcp_optmenu['menu'].add_command(label='All', command=_setit(single_sbcp, 'All'))
single_sbcp_optmenu.config(state=DISABLED)
if thisdata.results is not None:
plotbox.config(state=NORMAL)
single_ent_optmenu.config(state=NORMAL)
single_ent_optmenu['menu'].delete(0, 'end')
single_ent_optmenu['menu'].add_command(label='All', command=_setit(single_entry, 'All'))
lst = []
for corp in list(thisdata.results.columns)[:200]:
lst.append(corp)
single_ent_optmenu['menu'].add_command(label=corp, command=_setit(single_entry, corp))
single_entry_or_subcorpus['entries'] = lst
else:
single_ent_optmenu.config(state=NORMAL)
single_ent_optmenu['menu'].delete(0, 'end')
single_ent_optmenu['menu'].add_command(label='All', command=_setit(single_entry, 'All'))
single_ent_optmenu.config(state=DISABLED)
plotbox.config(state=NORMAL)
plotbranch.set('totals')
plotbox.config(state=DISABLED)
Label(plot_option_frame, text='Data to plot:').grid(row=1, column=0, sticky=W)
# select result to plot
data_to_plot = StringVar(root)
most_recent = all_interrogations[list(all_interrogations.keys())[-1]]
data_to_plot.set(most_recent)
every_interrogation = OptionMenu(plot_option_frame, data_to_plot, *tuple([i for i in list(all_interrogations.keys())]))
every_interrogation.config(width=20)
every_interrogation.grid(column=0, row=2, sticky=W, columnspan=2)
data_to_plot.trace("w", plot_callback)
Label(plot_option_frame, text='Entry:').grid(row=3, column=0, sticky=W)
single_entry = StringVar(root)
single_entry.set('All')
#most_recent = all_interrogations[all_interrogations.keys()[-1]]
#single_entry.set(most_recent)
single_ent_optmenu = OptionMenu(plot_option_frame, single_entry, *tuple(['']))
single_ent_optmenu.config(width=20, state=DISABLED)
single_ent_optmenu.grid(column=1, row=3, sticky=E)
def single_entry_plot_callback(*args):
"""turn off things if single entry selected"""
if single_entry.get() != 'All':
sbpl_but.config(state=NORMAL)
sbplt.set(0)
sbpl_but.config(state=DISABLED)
num_to_plot_box.config(state=NORMAL)
number_to_plot.set('1')
num_to_plot_box.config(state=DISABLED)
single_sbcp_optmenu.config(state=DISABLED)
entries = single_entry_or_subcorpus['entries']
if plotnametext.get() == 'Untitled' or plotnametext.get() in entries:
plotnametext.set(single_entry.get())
else:
plotnametext.set('Untitled')
sbpl_but.config(state=NORMAL)
number_to_plot.set('7')
num_to_plot_box.config(state=NORMAL)
single_sbcp_optmenu.config(state=NORMAL)
single_entry.trace("w", single_entry_plot_callback)
Label(plot_option_frame, text='Subcorpus:').grid(row=4, column=0, sticky=W)
single_sbcp = StringVar(root)
single_sbcp.set('All')
#most_recent = all_interrogations[all_interrogations.keys()[-1]]
#single_sbcp.set(most_recent)
single_sbcp_optmenu = OptionMenu(plot_option_frame, single_sbcp, *tuple(['']))
single_sbcp_optmenu.config(width=20, state=DISABLED)
single_sbcp_optmenu.grid(column=1, row=4, sticky=E)
def single_sbcp_plot_callback(*args):
"""turn off things if single entry selected"""
if single_sbcp.get() != 'All':
sbpl_but.config(state=NORMAL)
sbplt.set(0)
sbpl_but.config(state=DISABLED)
num_to_plot_box.config(state=NORMAL)
#number_to_plot.set('1')
#num_to_plot_box.config(state=DISABLED)
single_ent_optmenu.config(state=DISABLED)
charttype.set('bar')
entries = single_entry_or_subcorpus['subcorpora']
if plotnametext.get() == 'Untitled' or plotnametext.get() in entries:
plotnametext.set(single_sbcp.get())
else:
plotnametext.set('Untitled')
sbpl_but.config(state=NORMAL)
#number_to_plot.set('7')
num_to_plot_box.config(state=NORMAL)
single_ent_optmenu.config(state=NORMAL)
charttype.set('line')
single_sbcp.trace("w", single_sbcp_plot_callback)
# branch selection
plotbranch = StringVar(root)
plotbranch.set('results')
plotbox = OptionMenu(plot_option_frame, plotbranch, 'results', 'totals')
#plotbox.config(state=DISABLED)
plotbox.grid(row=2, column=0, sticky=E, columnspan=2)
def plotbranch_callback(*args):
if plotbranch.get() == 'totals':
single_sbcp_optmenu.config(state=DISABLED)
single_ent_optmenu.config(state=DISABLED)
sbpl_but.config(state=NORMAL)
sbplt.set(0)
sbpl_but.config(state=DISABLED)
trans_but_vis.config(state=NORMAL)
transpose_vis.set(0)
trans_but_vis.config(state=DISABLED)
else:
single_sbcp_optmenu.config(state=NORMAL)
single_ent_optmenu.config(state=NORMAL)
sbpl_but.config(state=NORMAL)
trans_but_vis.config(state=NORMAL)
plotbranch.trace('w', plotbranch_callback)
# num_to_plot
Label(plot_option_frame, text='Results to show:').grid(row=5, column=0, sticky=W)
number_to_plot = StringVar()
number_to_plot.set('7')
num_to_plot_box = Entry(plot_option_frame, textvariable=number_to_plot, width=3)
num_to_plot_box.grid(row=5, column=1, sticky=E)
all_text_widgets.append(num_to_plot_box)
def pie_callback(*args):
if charttype.get() == 'pie':
explbox.config(state=NORMAL)
ppie_but.config(state=NORMAL)
else:
explbox.config(state=DISABLED)
ppie_but.config(state=DISABLED)
if charttype.get().startswith('bar'):
#stackbut.config(state=NORMAL)
filledbut.config(state=NORMAL)
else:
#stackbut.config(state=DISABLED)
filledbut.config(state=DISABLED)
# can't do log y with area according to mpl
if charttype.get() == 'area':
logybut.deselect()
logybut.config(state=DISABLED)
filledbut.config(state=NORMAL)
else:
logybut.config(state=NORMAL)
filledbut.config(state=DISABLED)
# chart type
Label(plot_option_frame, text='Kind of chart').grid(row=6, column=0, sticky=W)
charttype = StringVar(root)
charttype.set('line')
kinds_of_chart = ('line', 'bar', 'barh', 'pie', 'area', 'heatmap')
chart_kind = OptionMenu(plot_option_frame, charttype, *kinds_of_chart)
chart_kind.config(width=10)
chart_kind.grid(row=6, column=1, sticky=E)
charttype.trace("w", pie_callback)
# axes
Label(plot_option_frame, text='x axis label:').grid(row=7, column=0, sticky=W)
x_axis_l = StringVar()
x_axis_l.set('')
tmp = Entry(plot_option_frame, textvariable=x_axis_l, font=("Courier New", 14), width=18)
tmp.grid(row=7, column=1, sticky=E)
all_text_widgets.append(tmp)
Label(plot_option_frame, text='y axis label:').grid(row=8, column=0, sticky=W)
y_axis_l = StringVar()
y_axis_l.set('')
tmp = Entry(plot_option_frame, textvariable=y_axis_l, font=("Courier New", 14), width=18)
tmp.grid(row=8, column=1, sticky=E)
all_text_widgets.append(tmp)
tmp = Label(plot_option_frame, text='Explode:')
if not small_screen:
tmp.grid(row=9, column=0, sticky=W)
explval = StringVar()
explval.set('')
explbox = Entry(plot_option_frame, textvariable=explval, font=("Courier New", 14), width=18)
if not small_screen:
explbox.grid(row=9, column=1, sticky=E)
all_text_widgets.append(explbox)
explbox.config(state=DISABLED)
# log options
log_x = IntVar()
Checkbutton(plot_option_frame, text="Log x axis", variable=log_x).grid(column=0, row=10, sticky=W)
log_y = IntVar()
logybut = Checkbutton(plot_option_frame, text="Log y axis", variable=log_y, width=13)
logybut.grid(column=1, row=10, sticky=E)
# transpose
transpose_vis = IntVar()
trans_but_vis = Checkbutton(plot_option_frame, text="Transpose", variable=transpose_vis, onvalue=True, offvalue=False, width=13)
trans_but_vis.grid(column=1, row=11, sticky=E)
cumul = IntVar()
cumulbutton = Checkbutton(plot_option_frame, text="Cumulative", variable=cumul, onvalue=True, offvalue=False)
cumulbutton.grid(column=0, row=11, sticky=W)
bw = IntVar()
Checkbutton(plot_option_frame, text="Black and white", variable=bw, onvalue=True, offvalue=False).grid(column=0, row=12, sticky=W)
texuse = IntVar()
tbut = Checkbutton(plot_option_frame, text="Use TeX", variable=texuse, onvalue=True, offvalue=False, width=13)
tbut.grid(column=1, row=12, sticky=E)
tbut.deselect()
if not py_script:
tbut.config(state=DISABLED)
rl = IntVar()
Checkbutton(plot_option_frame, text="Reverse legend", variable=rl, onvalue=True, offvalue=False).grid(column=0, row=13, sticky=W)
sbplt = IntVar()
sbpl_but = Checkbutton(plot_option_frame, text="Subplots", variable=sbplt, onvalue=True, offvalue=False, width=13)
sbpl_but.grid(column=1, row=13, sticky=E)
def sbplt_callback(*args):
"""if subplots are happening, allow layout"""
if sbplt.get():
lay1menu.config(state=NORMAL)
lay2menu.config(state=NORMAL)
else:
lay1menu.config(state=DISABLED)
lay2menu.config(state=DISABLED)
sbplt.trace("w", sbplt_callback)
gridv = IntVar()
gridbut = Checkbutton(plot_option_frame, text="Grid", variable=gridv, onvalue=True, offvalue=False)
gridbut.select()
gridbut.grid(column=0, row=14, sticky=W)
stackd = IntVar()
stackbut = Checkbutton(plot_option_frame, text="Stacked", variable=stackd, onvalue=True, offvalue=False, width=13)
stackbut.grid(column=1, row=14, sticky=E)
#stackbut.config(state=DISABLED)
part_pie = IntVar()
ppie_but = Checkbutton(plot_option_frame, text="Partial pie", variable=part_pie, onvalue=True, offvalue=False)
if not small_screen:
ppie_but.grid(column=0, row=15, sticky=W)
ppie_but.config(state=DISABLED)
filledvar = IntVar()
filledbut = Checkbutton(plot_option_frame, text="Filled", variable=filledvar, onvalue=True, offvalue=False, width=13)
if not small_screen:
filledbut.grid(column=1, row=15, sticky=E)
filledbut.config(state=DISABLED)
# chart type
Label(plot_option_frame, text='Colour scheme:').grid(row=16, column=0, sticky=W)
chart_cols = StringVar(root)
schemes = tuple(sorted(('Paired', 'Spectral', 'summer', 'Set1', 'Set2', 'Set3',
'Dark2', 'prism', 'RdPu', 'YlGnBu', 'RdYlBu', 'gist_stern', 'cool', 'coolwarm',
'gray', 'GnBu', 'gist_ncar', 'gist_rainbow', 'Wistia', 'CMRmap', 'bone',
'RdYlGn', 'spring', 'terrain', 'PuBu', 'spectral', 'rainbow', 'gist_yarg',
'BuGn', 'bwr', 'cubehelix', 'Greens', 'PRGn', 'gist_heat', 'hsv',
'Pastel2', 'Pastel1', 'jet', 'gist_earth', 'copper', 'OrRd', 'brg',
'gnuplot2', 'BuPu', 'Oranges', 'PiYG', 'YlGn', 'Accent', 'gist_gray', 'flag',
'BrBG', 'Reds', 'RdGy', 'PuRd', 'Blues', 'autumn', 'ocean', 'pink', 'binary',
'winter', 'gnuplot', 'hot', 'YlOrBr', 'seismic', 'Purples', 'RdBu', 'Greys',
'YlOrRd', 'PuOr', 'PuBuGn', 'nipy_spectral', 'afmhot',
'viridis', 'magma', 'plasma', 'inferno', 'diverge', 'default')))
ch_col = OptionMenu(plot_option_frame, chart_cols, *schemes)
ch_col.config(width=17)
ch_col.grid(row=16, column=1, sticky=E)
chart_cols.set('viridis')
# style
from matplotlib import style
try:
stys = tuple(stys.available)
except:
stys = tuple(('ggplot', 'fivethirtyeight', 'bmh', 'matplotlib', \
'mpl-white', 'classic', 'seaborn-talk'))
plot_style = StringVar(root)
plot_style.set('ggplot')
Label(plot_option_frame, text='Plot style:').grid(row=17, column=0, sticky=W)
pick_a_style = OptionMenu(plot_option_frame, plot_style, *stys)
pick_a_style.config(width=17)
pick_a_style.grid(row=17, column=1, sticky=E)
def ps_callback(*args):
if plot_style.get().startswith('seaborn'):
chart_cols.set('Default')
ch_col.config(state=DISABLED)
else:
ch_col.config(state=NORMAL)
plot_style.trace("w", ps_callback)
# legend pos
Label(plot_option_frame, text='Legend position:').grid(row=18, column=0, sticky=W)
legloc = StringVar(root)
legloc.set('best')
locs = tuple(('best', 'upper right', 'right', 'lower right', 'lower left', 'upper left', 'middle', 'none'))
loc_options = OptionMenu(plot_option_frame, legloc, *locs)
loc_options.config(width=17)
loc_options.grid(row=18, column=1, sticky=E)
# figure size
Label(plot_option_frame, text='Figure size:').grid(row=19, column=0, sticky=W)
figsiz1 = StringVar(root)
figsiz1.set('10')
figsizes = tuple(('2', '4', '6', '8', '10', '12', '14', '16', '18'))
fig1 = OptionMenu(plot_option_frame, figsiz1, *figsizes)
fig1.configure(width=6)
fig1.grid(row=19, column=1, sticky=W, padx=(27, 0))
Label(plot_option_frame, text="x").grid(row=19, column=1, padx=(30, 0))
figsiz2 = StringVar(root)
figsiz2.set('4')
fig2 = OptionMenu(plot_option_frame, figsiz2, *figsizes)
fig2.configure(width=6)
fig2.grid(row=19, column=1, sticky=E)
# subplots layout
Label(plot_option_frame, text='Subplot layout:').grid(row=20, column=0, sticky=W)
lay1 = StringVar(root)
lay1.set('3')
figsizes = tuple([str(i) for i in range(1, 20)])
lay1menu = OptionMenu(plot_option_frame, lay1, *figsizes)
lay1menu.configure(width=6)
lay1menu.grid(row=20, column=1, sticky=W, padx=(27, 0))
Label(plot_option_frame, text="x").grid(row=20, column=1, padx=(30, 0))
lay2 = StringVar(root)
lay2.set('3')
lay2menu = OptionMenu(plot_option_frame, lay2, *figsizes)
lay2menu.configure(width=6)
lay2menu.grid(row=20, column=1, sticky=E)
lay1menu.config(state=DISABLED)
lay2menu.config(state=DISABLED)
# show_totals option
Label(plot_option_frame, text='Show totals: ').grid(row=21, column=0, sticky=W)
showtot = StringVar(root)
showtot.set('Off')
showtot_options = tuple(('Off', 'legend', 'plot', 'legend + plot'))
show_tot_menu = OptionMenu(plot_option_frame, showtot, *showtot_options)
show_tot_menu.grid(row=21, column=1, sticky=E)
# plot button
plotbut = Button(plot_option_frame, text='Plot')
plotbut.grid(row=22, column=1, sticky=E)
plotbut.config(command=lambda: runner(plotbut, do_plotting), state=DISABLED)
################### ################### ################### ###################
# CONCORDANCE TAB # # CONCORDANCE TAB # # CONCORDANCE TAB # # CONCORDANCE TAB #
################### ################### ################### ###################
def add_conc_lines_to_window(data, loading=False, preserve_colour=True):
import pandas as pd
import re
#pd.set_option('display.height', 1000)
#pd.set_option('display.width', 1000)
pd.set_option('display.max_colwidth', 200)
import corpkit
from corpkit.interrogation import Concordance
if isinstance(data, Concordance):
current_conc[0] = data
elif isinstance(data, pd.core.frame.DataFrame):
data = Concordance(data)
current_conc[0] = data
else:
current_conc[0] = data.concordance
data = data.concordance
if win.get() == 'Window':
window = 70
else:
window = int(win.get())
fnames = show_filenames.get()
them = show_themes.get()
spk = show_speaker.get()
subc = show_subcorpora.get()
ix = show_index.get()
if not fnames:
data = data.drop('f', axis=1, errors='ignore')
if not them:
data = data.drop('t', axis=1, errors='ignore')
if not spk:
data = data.drop('s', axis=1, errors='ignore')
if not subc:
data = data.drop('c', axis=1, errors='ignore')
if not ix:
data = data.drop('i', axis=1, errors='ignore')
if them:
data = data.drop('t', axis=1, errors='ignore')
themelist = get_list_of_themes(data)
if any(t != '' for t in themelist):
data.insert(0, 't', themelist)
# only do left align when long result ...
# removed because it's no big deal if always left aligned, and this
# copes when people search for 'root' or something.
def resize_by_window_size(df, window):
import os
if 'f' in list(df.columns):
df['f'] = df['f'].apply(os.path.basename)
df['l'] = df['l'].str.slice(start=-window, stop=None)
df['l'] = df['l'].str.rjust(window)
df['r'] = df['r'].str.slice(start=0, stop=window)
df['r'] = df['r'].str.ljust(window)
df['m'] = df['m'].str.ljust(df['m'].str.len().max())
return df
moddata = resize_by_window_size(data, window)
lines = moddata.to_string(header=False, index=show_df_index.get()).splitlines()
#lines = [re.sub('\s*\.\.\.\s*$', '', s) for s in lines]
conclistbox.delete(0, END)
for line in lines:
conclistbox.insert(END, line)
if preserve_colour:
# itemcoldict has the NUMBER and COLOUR
index_regex = re.compile(r'^([0-9]+)')
# make dict for NUMBER:INDEX
index_dict = {}
lines = conclistbox.get(0, END)
for index, line in enumerate(lines):
index_dict[int(re.search(index_regex, conclistbox.get(index)).group(1))] = index
todel = []
for item, colour in list(itemcoldict.items()):
try:
conclistbox.itemconfig(index_dict[item], {'bg':colour})
except KeyError:
todel.append(item)
for i in todel:
del itemcoldict[i]
if loading:
timestring('Concordances loaded.')
else:
timestring('Concordancing done: %d results.' % len(lines))
def delete_conc_lines(*args):
if type(current_conc[0]) == str:
return
items = conclistbox.curselection()
#current_conc[0].results.drop(current_conc[0].results.iloc[1,].name)
r = current_conc[0].drop([current_conc[0].iloc[int(n),].name for n in items])
add_conc_lines_to_window(r)
if len(items) == 1:
timestring('%d line removed.' % len(items))
if len(items) > 1:
timestring('%d lines removed.' % len(items))
global conc_saved
conc_saved = False
def delete_reverse_conc_lines(*args):
if type(current_conc[0]) == str:
return
items = [int(i) for i in conclistbox.curselection()]
r = current_conc[0].iloc[items,]
add_conc_lines_to_window(r)
conclistbox.select_set(0, END)
if len(conclistbox.get(0, END)) - len(items) == 1:
timestring('%d line removed.' % ((len(conclistbox.get(0, END)) - len(items))))
if len(conclistbox.get(0, END)) - len(items) > 1:
timestring('%d lines removed.' % ((len(conclistbox.get(0, END)) - len(items))))
global conc_saved
conc_saved = False
def conc_export(data='default'):
"""export conc lines to csv"""
import os
import pandas
if type(current_conc[0]) == str:
timestring('Nothing to export.')
return
if in_a_project.get() == 0:
home = os.path.expanduser("~")
docpath = os.path.join(home, 'Documents')
else:
docpath = project_fullpath.get()
if data == 'default':
thedata = current_conc[0]
thedata = thedata.to_csv(header = False, sep = '\t')
else:
thedata = all_conc[data]
thedata = thedata.to_csv(header = False, sep = '\t')
if sys.platform == 'darwin':
the_kwargs = {'message': 'Choose a name and place for your exported data.'}
else:
the_kwargs = {}
savepath = filedialog.asksaveasfilename(title='Save file',
initialdir=exported_fullpath.get(),
defaultextension='.csv',
initialfile='data.csv',
**the_kwargs)
if savepath == '':
return
with open(savepath, "w") as fo:
fo.write(thedata)
timestring('Concordance lines exported.')
global conc_saved
conc_saved = False
def get_list_of_colours(df):
flipped_colour={v: k for k, v in list(colourdict.items())}
colours = []
for i in list(df.index):
# if the item has been coloured
if i in list(itemcoldict.keys()):
itscolour=itemcoldict[i]
colournumber = flipped_colour[itscolour]
# append the number of the colour code, with some corrections
if colournumber == 0:
colournumber = 10
if colournumber == 9:
colournumber = 99
colours.append(colournumber)
else:
colours.append(10)
return colours
def get_list_of_themes(df):
flipped_colour={v: k for k, v in list(colourdict.items())}
themes = []
for i in list(df.index):
# if the item has been coloured
if i in list(itemcoldict.keys()):
itscolour=itemcoldict[i]
colournumber = flipped_colour[itscolour]
theme = entryboxes[list(entryboxes.keys())[colournumber]].get()
# append the number of the colour code, with some corrections
if theme is not False and theme != '':
themes.append(theme)
else:
themes.append('')
else:
themes.append('')
if all(i == '' for i in themes):
timestring('Warning: no scheme defined.')
return themes
def conc_sort(*args):
"""various sorting for conc, by updating dataframe"""
import re
import pandas
import itertools
sort_way = True
if isinstance(current_conc[0], str):
return
if prev_sortval[0] == sortval.get():
# if subcorpus is the same, etc, as well
sort_way = toggle()
df = current_conc[0]
prev_sortval[0] = sortval.get()
# sorting by first column is easy, so we don't need pandas
if sortval.get() == 'M1':
low = [l.lower() for l in df['m']]
df['tosorton'] = low
elif sortval.get() == 'File':
low = [l.lower() for l in df['f']]
df['tosorton'] = low
elif sortval.get() == 'Colour':
colist = get_list_of_colours(df)
df['tosorton'] = colist
elif sortval.get() == 'Scheme':
themelist = get_list_of_themes(df)
#df.insert(1, 't', themelist)
df.insert(1, 'tosorton', themelist)
elif sortval.get() == 'Index' or sortval.get() == 'Sort':
df = df.sort(ascending=sort_way)
elif sortval.get() == 'Subcorpus':
sbs = [l.lower() for l in df['c']]
df['tosorton'] = sbs
elif sortval.get() == 'Random':
import pandas
import numpy as np
df = df.reindex(np.random.permutation(df.index))
elif sortval.get() == 'Speaker':
try:
low = [l.lower() for l in df['s']]
except:
timestring('No speaker information to sort by.')
return
df['tosorton'] = low
# if sorting by other columns, however, it gets tough.
else:
td = {}
#if 'note' in kwargs.keys():
# td['note'] = kwargs['note']
# add_nltk_data_to_nltk_path(**td)
# tokenise the right part of each line
# get l or r column
col = sortval.get()[0].lower()
tokenised = [s.split() for s in list(df[col].values)]
if col == 'm':
repeats = 2
else:
repeats = 6
for line in tokenised:
for i in range(6 - len(line)):
if col == 'l':
line.insert(0, '')
if col == 'r':
line.append('')
# get 1-5 and convert it
num = int(sortval.get().lstrip('LMR'))
if col == 'l':
num = -num
if col == 'r':
num = num - 1
just_sortword = []
for l in tokenised:
if col != 'm':
just_sortword.append(l[num].lower())
else:
# horrible
if len(l) == 1:
just_sortword.append(l[0].lower())
elif len(l) > 1:
if num == 2:
just_sortword.append(l[1].lower())
elif num == -2:
just_sortword.append(l[-2].lower())
elif num == -1:
just_sortword.append(l[-1].lower())
# append list to df
df['tosorton'] = just_sortword
if sortval.get() not in ['Index', 'Random', 'Sort']:
df = df.sort(['tosorton'], ascending=sort_way)
df = df.drop(['tosorton'], axis=1, errors='ignore')
if show_filenames.get() == 0:
add_conc_lines_to_window(df.drop('f', axis=1, errors='ignore'))
else:
add_conc_lines_to_window(df)
timestring('%d concordance lines sorted.' % len(conclistbox.get(0, END)))
global conc_saved
conc_saved = False
def do_inflection(pos='v'):
global tb
from corpkit.dictionaries.process_types import get_both_spellings, add_verb_inflections
# get every word
all_words = [w.strip().lower() for w in tb.get(1.0, END).split()]
# try to get just selection
cursel = False
try:
lst = [w.strip().lower() for w in tb.get(SEL_FIRST, SEL_LAST).split()]
cursel = True
except:
lst = [w.strip().lower() for w in tb.get(1.0, END).split()]
lst = get_both_spellings(lst)
if pos == 'v':
expanded = add_verb_inflections(lst)
if pos == 'n':
from corpkit.inflect import pluralize
expanded = []
for w in lst:
expanded.append(w)
pl = pluralize(w)
if pl != w:
expanded.append(pl)
if pos == 'a':
from corpkit.inflect import grade
expanded = []
for w in lst:
expanded.append(w)
comp = grade(w, suffix = "er")
if comp != w:
expanded.append(comp)
supe = grade(w, suffix = "est")
if supe != w:
expanded.append(supe)
if cursel:
expanded = expanded + all_words
lst = sorted(set(expanded))
# delete widget text, reinsrt all
tb.delete(1.0, END)
for w in lst:
tb.insert(END, w + '\n')
def make_dict_from_existing_wordlists():
from collections import namedtuple
def convert(dictionary):
return namedtuple('outputnames', list(dictionary.keys()))(**dictionary)
all_preset_types = {}
from corpkit.dictionaries.process_types import processes
from corpkit.dictionaries.roles import roles
from corpkit.dictionaries.wordlists import wordlists
from corpkit.other import as_regex
customs = convert(custom_special_dict)
special_qs = [processes, roles, wordlists]
for kind in special_qs:
try:
types = [k for k in list(kind.__dict__.keys())]
except AttributeError:
types = [k for k in list(kind._asdict().keys())]
for t in types:
if kind == roles:
all_preset_types[t.upper() + '_ROLE'] = kind._asdict()[t]
else:
try:
all_preset_types[t.upper()] = kind.__dict__[t]
except AttributeError:
all_preset_types[t.upper()] = kind._asdict()[t]
return all_preset_types
predict = make_dict_from_existing_wordlists()
for k, v in list(predict.items()):
custom_special_dict[k.upper()] = v
def store_wordlist():
global tb
lst = [w.strip().lower() for w in tb.get(1.0, END).split()]
global schemename
if schemename.get() == '<Enter a name>':
timestring('Wordlist needs a name.')
return
specname = ''.join([i for i in schemename.get().upper() if i.isalnum() or i == '_'])
if specname in list(predict.keys()):
timestring('Name "%s" already taken, sorry.' % specname)
return
else:
if specname in list(custom_special_dict.keys()):
should_continue = messagebox.askyesno("Overwrite list",
"Overwrite existing list named '%s'?" % specname)
if not should_continue:
return
custom_special_dict[specname] = lst
global cust_spec
cust_spec.delete(0, END)
for k, v in sorted(custom_special_dict.items()):
cust_spec.insert(END, k)
color_saved(cust_spec, colour1 = '#ccebc5', colour2 = '#fbb4ae', lists = True)
timestring('LIST:%s stored to custom wordlists.' % specname)
parser_opts = StringVar()
speakseg = IntVar()
parse_with_metadata = IntVar()
tokenise_pos = IntVar()
tokenise_lem = IntVar()
clicked_done = IntVar()
clicked_done.set(0)
def parser_options(kind):
"""
A popup with corenlp options, to display before parsing.
this is a good candidate for 'preferences'
"""
from tkinter import Toplevel
global poptions
poptions = Toplevel()
poptions.title('Parser options')
from collections import OrderedDict
popt = OrderedDict()
if kind == 'parse':
tups = [('Tokenise', 'tokenize'),
('Clean XML', 'cleanxml'),
('Sentence splitting', 'ssplit'),
('POS tagging', 'pos'),
('Lemmatisation', 'lemma'),
('Named entity recognition', 'ner'),
('Parse', 'parse'),
('Referent tracking', 'dcoref')]
for k, v in tups:
popt[k] = v
butvar = {}
butbut = {}
orders = {'tokenize': 0,
'cleanxml': 1,
'ssplit': 2,
'pos': 3,
'lemma': 4,
'ner': 5,
'parse': 6,
'dcoref': 7}
for index, (k, v) in enumerate(popt.items()):
tmp = StringVar()
but = Checkbutton(poptions, text=k, variable=tmp, onvalue=v, offvalue=False)
but.grid(sticky=W)
if k != 'Clean XML':
but.select()
else:
but.deselect()
butbut[index] = but
butvar[index] = tmp
if kind == 'tokenise':
Checkbutton(poptions, text='POS tag', variable=tokenise_pos, onvalue=True, offvalue=False).grid(sticky=W)
Checkbutton(poptions, text='Lemmatise', variable=tokenise_lem, onvalue=True, offvalue=False).grid(sticky=W)
Checkbutton(poptions, text='Speaker segmentation', variable=speakseg, onvalue=True, offvalue=False).grid(sticky=W)
Checkbutton(poptions, text='XML metadata', variable=parse_with_metadata, onvalue=True, offvalue=False).grid(sticky=W)
def optionspicked(*args):
vals = [i.get() for i in list(butvar.values()) if i.get() is not False and i.get() != 0 and i.get() != '0']
vals = sorted(vals, key=lambda x:orders[x])
the_opts = ','.join(vals)
clicked_done.set(1)
poptions.destroy()
parser_opts.set(the_opts)
def qut():
poptions.destroy()
stopbut = Button(poptions, text='Cancel', command=qut)
stopbut.grid(row=15, sticky='w', padx=5)
stopbut = Button(poptions, text='Done', command=optionspicked)
stopbut.grid(row=15, sticky='e', padx=5)
############## ############## ############## ############## ##############
# WORDLISTS # # WORDLISTS # # WORDLISTS # # WORDLISTS # # WORDLISTS #
############## ############## ############## ############## ##############
def custom_lists():
"""a popup for defining custom wordlists"""
from tkinter import Toplevel
popup = Toplevel()
popup.title('Custom wordlists')
popup.wm_attributes('-topmost', 1)
Label(popup, text='Create wordlist', font=("Helvetica", 13, "bold")).grid(column=0, row=0)
global schemename
schemename = StringVar()
schemename.set('<Enter a name>')
scheme_name_field = Entry(popup, textvariable=schemename, justify=CENTER, width=21, font=("Courier New", 13))
#scheme_name_field.bind('<Button-1>', select_all_text)
scheme_name_field.grid(column=0, row=5, sticky=W, padx=(7, 0))
global tb
custom_words = Frame(popup, width=9, height=40)
custom_words.grid(row=1, column=0, padx=5)
cwscrbar = Scrollbar(custom_words)
cwscrbar.pack(side=RIGHT, fill=Y)
tb = Text(custom_words, yscrollcommand=cwscrbar.set, relief=SUNKEN,
bg='#F4F4F4', width=20, height=26, font=("Courier New", 13))
cwscrbar.config(command=tb.yview)
bind_textfuncts_to_widgets([tb, scheme_name_field])
tb.pack(side=LEFT, fill=BOTH)
tmp = Button(popup, text='Get verb inflections', command=lambda: do_inflection(pos = 'v'), width=17)
tmp.grid(row=2, column=0, sticky=W, padx=(7, 0))
tmp = Button(popup, text='Get noun inflections', command=lambda: do_inflection(pos = 'n'), width=17)
tmp.grid(row=3, column=0, sticky=W, padx=(7, 0))
tmp = Button(popup, text='Get adjective forms', command=lambda: do_inflection(pos = 'a'), width=17)
tmp.grid(row=4, column=0, sticky=W, padx=(7, 0))
#Button(text='Inflect as noun', command=lambda: do_inflection(pos = 'n')).grid()
savebut = Button(popup, text='Store', command=store_wordlist, width=17)
savebut.grid(row=6, column=0, sticky=W, padx=(7, 0))
Label(popup, text='Previous wordlists', font=("Helvetica", 13, "bold")).grid(column=1, row=0, padx=15)
other_custom_queries = Frame(popup, width=9, height=30)
other_custom_queries.grid(row=1, column=1, padx=15)
pwlscrbar = Scrollbar(other_custom_queries)
pwlscrbar.pack(side=RIGHT, fill=Y)
global cust_spec
cust_spec = Listbox(other_custom_queries, selectmode = EXTENDED, height=24, relief=SUNKEN, bg='#F4F4F4',
yscrollcommand=pwlscrbar.set, exportselection=False, width=20,
font=("Courier New", 13))
pwlscrbar.config(command=cust_spec.yview)
cust_spec.pack()
cust_spec.delete(0, END)
def colour_the_custom_queries(*args):
color_saved(cust_spec, colour1 = '#ccebc5', colour2 = '#fbb4ae', lists = True)
cust_spec.bind('<<Modified>>', colour_the_custom_queries)
for k, v in sorted(custom_special_dict.items()):
cust_spec.insert(END, k)
colour_the_custom_queries()
def remove_this_custom_query():
global cust_spec
indexes = cust_spec.curselection()
for index in indexes:
name = cust_spec.get(index)
del custom_special_dict[name]
cust_spec.delete(0, END)
for k, v in sorted(custom_special_dict.items()):
cust_spec.insert(END, k)
color_saved(cust_spec, colour1 = '#ccebc5', colour2 = '#fbb4ae', lists = True)
if len(indexes) == 1:
timestring('%s forgotten.' % name)
else:
timestring('%d lists forgotten.' % len(indexes))
def delete_this_custom_query():
global cust_spec
indexes = cust_spec.curselection()
for index in indexes:
name = cust_spec.get(index)
if name in list(predict.keys()):
timestring("%s can't be permanently deleted." % name)
return
del custom_special_dict[name]
try:
del saved_special_dict[name]
except:
pass
dump_custom_list_json()
cust_spec.delete(0, END)
for k, v in sorted(custom_special_dict.items()):
cust_spec.insert(END, k)
color_saved(cust_spec, colour1 = '#ccebc5', colour2 = '#fbb4ae', lists = True)
if len(indexes) == 1:
timestring('%s permanently deleted.' % name)
else:
timestring('%d lists permanently deleted.' % len(indexes))
def show_this_custom_query(*args):
global cust_spec
index = cust_spec.curselection()
if len(index) > 1:
timestring("Can only show one list at a time.")
return
name = cust_spec.get(index)
tb.delete(1.0, END)
for i in custom_special_dict[name]:
tb.insert(END, i + '\n')
schemename.set(name)
cust_spec.bind('<Return>', show_this_custom_query)
def merge_this_custom_query(*args):
global cust_spec
indexes = cust_spec.curselection()
names = [cust_spec.get(i) for i in indexes]
tb.delete(1.0, END)
for name in names:
for i in custom_special_dict[name]:
tb.insert(END, i + '\n')
schemename.set('Merged')
def add_custom_query_to_json():
global cust_spec
indexes = cust_spec.curselection()
for index in indexes:
name = cust_spec.get(index)
saved_special_dict[name] = custom_special_dict[name]
dump_custom_list_json()
color_saved(cust_spec, colour1 = '#ccebc5', colour2 = '#fbb4ae', lists = True)
if len(indexes) == 1:
timestring('%s saved to file.' % name)
else:
timestring('%d lists saved to file.' % len(indexes))
Button(popup, text='View/edit', command=show_this_custom_query, width=17).grid(column=1, row=2, sticky=E, padx=(0, 7))
Button(popup, text='Merge', command=merge_this_custom_query, width=17).grid(column=1, row=3, sticky=E, padx=(0, 7))
svb = Button(popup, text='Save', command=add_custom_query_to_json, width=17)
svb.grid(column=1, row=4, sticky=E, padx=(0, 7))
if in_a_project.get() == 0:
svb.config(state=DISABLED)
else:
svb.config(state=NORMAL)
Button(popup, text='Remove', command=remove_this_custom_query, width=17).grid(column=1, row=5, sticky=E, padx=(0, 7))
Button(popup, text='Delete', command=delete_this_custom_query, width=17).grid(column=1, row=6, sticky=E, padx=(0, 7))
def have_unsaved_list():
"""finds out if there is an unsaved list"""
global tb
lst = [w.strip().lower() for w in tb.get(1.0, END).split()]
if any(lst == l for l in list(custom_special_dict.values())):
return False
else:
return True
def quit_listing(*args):
if have_unsaved_list():
should_continue = messagebox.askyesno("Unsaved data",
"Unsaved list will be forgotten. Continue?")
if not should_continue:
return
popup.destroy()
stopbut = Button(popup, text='Done', command=quit_listing)
stopbut.grid(column=0, columnspan=2, row=7, pady=7)
############## ############## ############## ############## ##############
# COLSCHEMES # # COLSCHEMES # # COLSCHEMES # # COLSCHEMES # # COLSCHEMES #
############## ############## ############## ############## ##############
# a place for the toplevel entry info
entryboxes = OrderedDict()
# fill it with null data
for i in range(10):
tmp = StringVar()
tmp.set('')
entryboxes[i] = tmp
def codingschemer():
try:
global toplevel
toplevel.destroy()
except:
pass
from tkinter import Toplevel
toplevel = Toplevel()
toplevel.geometry('+1089+85')
toplevel.title("Coding scheme")
toplevel.wm_attributes('-topmost', 1)
Label(toplevel, text='').grid(row=0, column=0, pady=2)
def quit_coding(*args):
toplevel.destroy()
#Label(toplevel, text=('When concordancing, you can colour code lines using 0-9 keys. '\
# 'If you name the colours here, you can export or save the concordance lines with '\
# 'names attached.'), font=('Helvetica', 13, 'italic'), wraplength = 250, justify=LEFT).grid(row=0, column=0, columnspan=2)
stopbut = Button(toplevel, text='Done', command=quit_coding)
stopbut.grid(row=12, column=0, columnspan=2, pady=15)
for index, colour_index in enumerate(colourdict.keys()):
Label(toplevel, text='Key: %d' % colour_index).grid(row=index + 1, column=0)
fore = 'black'
if colour_index == 9:
fore = 'white'
tmp = Entry(toplevel, textvariable=entryboxes[index], bg=colourdict[colour_index], fg = fore)
all_text_widgets.append(tmp)
if index == 0:
tmp.focus_set()
tmp.grid(row=index + 1, column=1, padx=10)
toplevel.bind("<Return>", quit_coding)
toplevel.bind("<Tab>", focus_next_window)
# conc box needs to be defined up here
fsize = IntVar()
fsize.set(12)
conc_height = 510 if small_screen else 565
cfrm = Frame(tab4, height=conc_height, width=note_width - 10)
cfrm.grid(column=0, row=0, sticky='nw')
cscrollbar = Scrollbar(cfrm)
cscrollbarx = Scrollbar(cfrm, orient=HORIZONTAL)
cscrollbar.pack(side=RIGHT, fill=Y)
cscrollbarx.pack(side=BOTTOM, fill=X)
conclistbox = Listbox(cfrm, yscrollcommand=cscrollbar.set, relief=SUNKEN, bg='#F4F4F4',
xscrollcommand=cscrollbarx.set, height=conc_height,
width=note_width - 10, font=('Courier New', fsize.get()),
selectmode = EXTENDED)
conclistbox.pack(fill=BOTH)
cscrollbar.config(command=conclistbox.yview)
cscrollbarx.config(command=conclistbox.xview)
cfrm.pack_propagate(False)
def dec_concfont(*args):
size = fsize.get()
fsize.set(size - 1)
conclistbox.configure(font=('Courier New', fsize.get()))
def inc_concfont(*args):
size = fsize.get()
fsize.set(size + 1)
conclistbox.configure(font=('Courier New', fsize.get()))
def select_all_conclines(*args):
conclistbox.select_set(0, END)
def color_conc(colour=0, *args):
import re
"""color a conc line"""
index_regex = re.compile(r'^([0-9]+)')
col = colourdict[colour]
if type(current_conc[0]) == str:
return
items = conclistbox.curselection()
for index in items:
conclistbox.itemconfig(index, {'bg':col})
ind = int(re.search(index_regex, conclistbox.get(index)).group(1))
itemcoldict[ind] = col
conclistbox.selection_clear(0, END)
conclistbox.bind("<BackSpace>", delete_conc_lines)
conclistbox.bind("<Shift-KeyPress-BackSpace>", delete_reverse_conc_lines)
conclistbox.bind("<Shift-KeyPress-Tab>", conc_sort)
conclistbox.bind("<%s-minus>" % key, dec_concfont)
conclistbox.bind("<%s-equal>" % key, inc_concfont)
conclistbox.bind("<%s-a>" % key, select_all_conclines)
conclistbox.bind("<%s-s>" % key, lambda x: concsave())
conclistbox.bind("<%s-e>" % key, lambda x: conc_export())
conclistbox.bind("<%s-t>" % key, lambda x: toggle_filenames())
conclistbox.bind("<%s-A>" % key, select_all_conclines)
conclistbox.bind("<%s-S>" % key, lambda x: concsave())
conclistbox.bind("<%s-E>" % key, lambda x: conc_export())
conclistbox.bind("<%s-T>" % key, lambda x: toggle_filenames())
conclistbox.bind("0", lambda x: color_conc(colour=0))
conclistbox.bind("1", lambda x: color_conc(colour=1))
conclistbox.bind("2", lambda x: color_conc(colour=2))
conclistbox.bind("3", lambda x: color_conc(colour=3))
conclistbox.bind("4", lambda x: color_conc(colour=4))
conclistbox.bind("5", lambda x: color_conc(colour=5))
conclistbox.bind("6", lambda x: color_conc(colour=6))
conclistbox.bind("7", lambda x: color_conc(colour=7))
conclistbox.bind("8", lambda x: color_conc(colour=8))
conclistbox.bind("9", lambda x: color_conc(colour=9))
conclistbox.bind("0", lambda x: color_conc(colour=0))
# these were 'generate' and 'edit', but they look ugly right now. the spaces are nice though.
#lab = StringVar()
#lab.set('Concordancing: %s' % os.path.basename(corpus_fullpath.get()))
#Label(tab4, textvariable=lab, font=("Helvetica", 13, "bold")).grid(row=1, column=0, padx=20, pady=10, columnspan=5, sticky=W)
#Label(tab4, text=' ', font=("Helvetica", 13, "bold")).grid(row=1, column=9, columnspan=2)
conc_right_button_frame = Frame(tab4)
conc_right_button_frame.grid(row=1, column=0, padx=(10,0), sticky='N', pady=(5, 0))
# edit conc lines
conc_left_buts = Frame(conc_right_button_frame)
conc_left_buts.grid(row=1, column=0, columnspan=6, sticky='W')
Button(conc_left_buts, text='Delete selected', command=lambda: delete_conc_lines(), ).grid(row=0, column=0, sticky=W)
Button(conc_left_buts, text='Just selected', command=lambda: delete_reverse_conc_lines(), ).grid(row=0, column=1)
#Button(conc_left_buts, text='Sort', command=lambda: conc_sort()).grid(row=0, column=4)
def toggle_filenames(*args):
if isinstance(current_conc[0], str):
return
data = current_conc[0]
add_conc_lines_to_window(data)
def make_df_matching_screen():
import re
if type(current_conc[0]) == str:
return
df = current_conc[0]
if show_filenames.get() == 0:
df = df.drop('f', axis=1, errors = 'ignore')
if show_themes.get() == 0:
df = df.drop('t', axis=1, errors = 'ignore')
ix_to_keep = []
lines = conclistbox.get(0, END)
reg = re.compile(r'^\s*([0-9]+)')
for l in lines:
s = re.search(reg, l)
ix_to_keep.append(int(s.group(1)))
df = df.ix[ix_to_keep]
df = df.reindex(ix_to_keep)
return df
def concsave():
name = simpledialog.askstring('Concordance name', 'Choose a name for your concordance lines:')
if not name or name == '':
return
df = make_df_matching_screen()
all_conc[name] = df
global conc_saved
conc_saved = True
refresh()
def merge_conclines():
toget = prev_conc_listbox.curselection()
should_continue = True
global conc_saved
if not conc_saved:
if type(current_conc[0]) != str and len(toget) > 1:
should_continue = messagebox.askyesno("Unsaved data",
"Unsaved concordance lines will be forgotten. Continue?")
else:
should_continue = True
if not should_continue:
return
import pandas
dfs = []
if toget != ():
if len(toget) < 2:
for item in toget:
nm = prev_conc_listbox.get(item)
dfs.append(all_conc[nm])
dfs.append(current_conc[0])
#timestring('Need multiple concordances to merge.' % name)
#return
for item in toget:
nm = prev_conc_listbox.get(item)
dfs.append(all_conc[nm])
else:
timestring('Nothing selected to merge.' % name)
return
df = pandas.concat(dfs, ignore_index = True)
should_drop = messagebox.askyesno("Remove duplicates",
"Remove duplicate concordance lines?")
if should_drop:
df = df.drop_duplicates(subset = ['l', 'm', 'r'])
add_conc_lines_to_window(df)
def load_saved_conc():
should_continue = True
global conc_saved
if not conc_saved:
if type(current_conc[0]) != str:
should_continue = messagebox.askyesno("Unsaved data",
"Unsaved concordance lines will be forgotten. Continue?")
else:
should_continue = True
if should_continue:
toget = prev_conc_listbox.curselection()
if len(toget) > 1:
timestring('Only one selection allowed for load.' % name)
return
if toget != ():
nm = prev_conc_listbox.get(toget[0])
df = all_conc[nm]
add_conc_lines_to_window(df, loading=True, preserve_colour=False)
else:
return
fourbuts = Frame(conc_right_button_frame)
fourbuts.grid(row=1, column=6, columnspan=1, sticky='E')
Button(fourbuts, text='Store as', command=concsave).grid(row=0, column=0)
Button(fourbuts, text='Remove', command= lambda: remove_one_or_more(window='conc', kind='concordance')).grid(row=0, column=1)
Button(fourbuts, text='Merge', command=merge_conclines).grid(row=0, column=2)
Button(fourbuts, text='Load', command=load_saved_conc).grid(row=0, column=3)
showbuts = Frame(conc_right_button_frame)
showbuts.grid(row=0, column=0, columnspan=6, sticky='w')
show_filenames = IntVar()
fnbut = Checkbutton(showbuts, text='Filenames', variable=show_filenames, command=toggle_filenames)
fnbut.grid(row=0, column=4)
#fnbut.select()
show_filenames.trace('w', toggle_filenames)
show_subcorpora = IntVar()
sbcrp = Checkbutton(showbuts, text='Subcorpora', variable=show_subcorpora, command=toggle_filenames)
sbcrp.grid(row=0, column=3)
sbcrp.select()
show_subcorpora.trace('w', toggle_filenames)
show_themes = IntVar()
themebut = Checkbutton(showbuts, text='Scheme', variable=show_themes, command=toggle_filenames)
themebut.grid(row=0, column=1)
#themebut.select()
show_themes.trace('w', toggle_filenames)
show_speaker = IntVar()
showspkbut = Checkbutton(showbuts, text='Speakers', variable=show_speaker, command=toggle_filenames)
showspkbut.grid(row=0, column=5)
#showspkbut.select()
show_speaker.trace('w', toggle_filenames)
show_index = IntVar()
show_s_w_ix = Checkbutton(showbuts, text='Index', variable=show_index, command=toggle_filenames)
#show_s_w_ix.select()
show_s_w_ix.grid(row=0, column=2)
show_index.trace('w', toggle_filenames)
show_df_index = IntVar()
indbut = Checkbutton(showbuts, text='#', variable=show_df_index, command=toggle_filenames)
indbut.grid(row=0, column=0)
indbut.select()
# disabling because turning index off can cause problems when sorting, etc
indbut.config(state=DISABLED)
show_df_index.trace('w', toggle_filenames)
interrobut_conc = Button(showbuts, text='Re-run')
interrobut_conc.config(command=lambda: runner(interrobut_conc, do_interrogation, conc = True), state=DISABLED)
interrobut_conc.grid(row=0, column=6, padx=(5,0))
annotation = False
txt_var = StringVar()
txt_var_r = StringVar()
def annotate_corpus():
"""
Allow the user to annotate the corpus
"""
anno_trans = {'Middle': 'm',
'Scheme': 't',
'Index': 'index',
'Colour': 'q'}
def allow_text(*args):
"""
If the user wants to add text as value, let him/her
"""
if anno_dec.get() == 'Custom':
txt_box_r.config(state=NORMAL)
else:
txt_box_r.config(state=DISABLED)
def go_action(*args):
"""
Do annotation
"""
from corpkit.corpus import Corpus
corp = Corpus(current_corpus.get(), print_info=False)
data = current_conc[0]
chosen = anno_dec.get()
# add colour and scheme to df
if chosen == 'Scheme':
themelist = get_list_of_themes(data)
if any(t != '' for t in themelist):
data.insert(0, 't', themelist)
elif chosen == 'Colour':
colourlist = get_list_of_colours(data)
if any(t != '' for t in colourlist):
data.insert(0, 'q', colourlist)
if chosen == 'Tag':
annotation = txt_box.get()
elif chosen == 'Custom':
field = txt_box.get()
value = txt_box_r.get()
annotation = {field: value}
else:
field = txt_box.get()
value = anno_trans.get(chosen, chosen)
annotation = {field: value}
if debug:
print('Annotation:', annotation)
corp.annotate(data, annotation, dry_run=False)
timestring('Annotation done.')
refresh_by_metadata()
anno_pop.destroy()
from tkinter import Toplevel
anno_pop = Toplevel()
#anno_pop.geometry('+400+40')
anno_pop.title("Annotate corpus")
anno_pop.wm_attributes('-topmost', 1)
#Label(anno_pop, text='Annotate with:').grid(row=1, column=0, sticky=W)
anno_dec = StringVar()
anno_dec.set('Middle')
annotype = ('Index', 'Position', 'Speaker', 'Colour', 'Scheme', 'Middle', 'Custom', 'Tag')
anno_lb = OptionMenu(anno_pop, anno_dec, *annotype)
anno_lb.grid(row=2, column=1, sticky=E)
Label(anno_pop, text='Field:').grid(row=1, column=0)
Label(anno_pop, text='Value:').grid(row=1, column=1)
txt_box = Entry(anno_pop, textvariable=txt_var, width=10)
all_text_widgets.append(txt_box)
txt_box.grid(row=2, column=0)
txt_box_r = Entry(anno_pop, textvariable=txt_var_r, width=22)
txt_box_r.config(state=DISABLED)
all_text_widgets.append(txt_box_r)
txt_box_r.grid(row=3, columnspan=2)
anno_dec.trace("w", allow_text)
do_anno = Button(anno_pop, text='Annotate', command=go_action)
do_anno.grid(row=4, columnspan=2)
def recalc(*args):
import pandas as pd
name = simpledialog.askstring('New name', 'Choose a name for the data:')
if not name:
return
else:
out = current_conc[0].calculate()
all_interrogations[name] = out
name_of_interro_spreadsheet.set(name)
i_resultname.set('Interrogation results: %s' % str(name_of_interro_spreadsheet.get()))
totals_as_df = pd.DataFrame(out.totals, dtype=object)
if out.results is not None:
update_spreadsheet(interro_results, out.results, height=340)
subs = out.results.index
else:
update_spreadsheet(interro_results, df_to_show=None, height=340)
subs = out.totals.index
update_spreadsheet(interro_totals, totals_as_df, height=10)
ind = list(all_interrogations.keys()).index(name_of_interro_spreadsheet.get())
if ind == 0:
prev.configure(state=DISABLED)
else:
prev.configure(state=NORMAL)
if ind + 1 == len(list(all_interrogations.keys())):
nex.configure(state=DISABLED)
else:
nex.configure(state=NORMAL)
refresh()
subc_listbox.delete(0, 'end')
for e in list(subs):
if e != 'tkintertable-order':
subc_listbox.insert(END, e)
timestring('Calculation done. "%s" created.' % name)
note.change_tab(1)
recalc_but = Button(showbuts, text='Calculate', command=recalc)
recalc_but.config(command=recalc, state=DISABLED)
recalc_but.grid(row=0, column=7, padx=(5,0))
win = StringVar()
win.set('Window')
wind_size = OptionMenu(conc_left_buts, win, *tuple(('Window', '20', '30', '40',
'50', '60', '70', '80', '90', '100')))
wind_size.config(width=10)
wind_size.grid(row=0, column=5)
win.trace("w", conc_sort)
# possible sort
sort_vals = ('Index', 'Subcorpus', 'File', 'Speaker', 'Colour',
'Scheme', 'Random', 'L5', 'L4', 'L3', 'L2', 'L1',
'M1', 'M2', 'M-2', 'M-1', 'R1', 'R2', 'R3', 'R4', 'R5')
sortval = StringVar()
sortval.set('Sort')
prev_sortval = ['None']
srtkind = OptionMenu(conc_left_buts, sortval, *sort_vals)
srtkind.config(width=10)
srtkind.grid(row=0, column=3)
sortval.trace("w", conc_sort)
# export to csv
Button(conc_left_buts, text='Export', command=lambda: conc_export()).grid(row=0, column=6)
# annotate
Button(conc_left_buts, text='Annotate', command=annotate_corpus).grid(row=0, column=7)
store_label = Label(conc_right_button_frame, text='Stored concordances', font=("Helvetica", 13, "bold"))
prev_conc = Frame(conc_right_button_frame)
prev_conc.grid(row=0, column=7, rowspan=3, columnspan=2,
sticky=E, padx=(10,0), pady=(4,0))
prevcbar = Scrollbar(prev_conc)
prevcbar.pack(side=RIGHT, fill=Y)
prev_conc_lb_size = 20
prev_conc_listbox = Listbox(prev_conc, selectmode=EXTENDED, width=prev_conc_lb_size,
height=4, relief=SUNKEN, bg='#F4F4F4',
yscrollcommand=prevcbar.set, exportselection=False)
prev_conc_listbox.pack()
cscrollbar.config(command=prev_conc_listbox.yview)
root.update()
# this laziness is dynamic calculation of how far apart the left and right
# button sets should be in the conc pane. i don't want to go reframing
# everything, so instead, we figure out the best distance by math
# width of window - width of left buttons - with of prev conc and 'stored concordances' label (approx)
padd = note_width - showbuts.winfo_width() - (prev_conc.winfo_width() * 2)
# for now, just a guess!
if padd < 0:
padd = 250
store_label.grid(row=0, column=6, sticky=E, padx=(padd,0))
############## ############## ############## ############## ##############
# MANAGE TAB # # MANAGE TAB # # MANAGE TAB # # MANAGE 'TAB' # # MANAGE TAB #
############## ############## ############## ############## ##############
def make_new_project():
import os
from corpkit.other import new_project
reset_everything()
name = simpledialog.askstring('New project', 'Choose a name for your project:')
if not name:
return
home = os.path.expanduser("~")
docpath = os.path.join(home, 'Documents')
if sys.platform == 'darwin':
the_kwargs = {'message': 'Choose a directory in which to create your new project'}
else:
the_kwargs = {}
fp = filedialog.askdirectory(title = 'New project location',
initialdir = docpath,
**the_kwargs)
if not fp:
return
new_proj_basepath.set('New project: "%s"' % name)
new_project(name = name, loc = fp, root=root)
project_fullpath.set(os.path.join(fp, name))
os.chdir(project_fullpath.get())
image_fullpath.set(os.path.join(project_fullpath.get(), 'images'))
savedinterro_fullpath.set(os.path.join(project_fullpath.get(), 'saved_interrogations'))
conc_fullpath.set(os.path.join(project_fullpath.get(), 'saved_concordances'))
corpora_fullpath.set(os.path.join(project_fullpath.get(), 'data'))
exported_fullpath.set(os.path.join(project_fullpath.get(), 'exported'))
log_fullpath.set(os.path.join(project_fullpath.get(), 'logs'))
addbut.config(state=NORMAL)
open_proj_basepath.set('Loaded: "%s"' % name)
save_config()
root.title("corpkit: %s" % os.path.basename(project_fullpath.get()))
#load_project(path = os.path.join(fp, name))
timestring('Project "%s" created.' % name)
note.focus_on(tab0)
update_available_corpora()
def get_saved_results(kind='interrogation', add_to=False):
from corpkit.other import load_all_results
if kind == 'interrogation':
datad = savedinterro_fullpath.get()
elif kind == 'concordance':
datad = conc_fullpath.get()
elif kind == 'image':
datad = image_fullpath.get()
if datad == '':
timestring('No project loaded.')
if kind == 'image':
image_list = sorted([f for f in os.listdir(image_fullpath.get()) if f.endswith('.png')])
for iname in image_list:
if iname.replace('.png', '') not in all_images:
all_images.append(iname.replace('.png', ''))
if len(image_list) > 0:
nbut.config(state=NORMAL)
else:
if kind == 'interrogation':
r = load_all_results(data_dir=datad, root=root, note=note)
else:
r = load_all_results(data_dir=datad, root=root, note=note)
if r is not None:
for name, loaded in list(r.items()):
if kind == 'interrogation':
if isinstance(loaded, dict):
for subname, subloaded in list(loaded.items()):
all_interrogations[name + '-' + subname] = subloaded
else:
all_interrogations[name] = loaded
else:
all_conc[name] = loaded
if len(list(all_interrogations.keys())) > 0:
nex.configure(state=NORMAL)
refresh()
def recentchange(*args):
"""if user clicks a recent project, open it"""
if recent_project.get() != '':
project_fullpath.set(recent_project.get())
load_project(path=project_fullpath.get())
def projchange(*args):
"""if user changes projects, add to recent list and save prefs"""
if project_fullpath.get() != '' and 'Contents/MacOS' not in project_fullpath.get():
in_a_project.set(1)
if project_fullpath.get() not in most_recent_projects:
most_recent_projects.append(project_fullpath.get())
save_tool_prefs(printout=False)
#update_available_corpora()
else:
in_a_project.set(0)
# corpus path setter
savedinterro_fullpath = StringVar()
savedinterro_fullpath.set('')
data_basepath = StringVar()
data_basepath.set('Select data directory')
in_a_project = IntVar()
in_a_project.set(0)
project_fullpath = StringVar()
project_fullpath.set(rd)
project_fullpath.trace("w", projchange)
recent_project = StringVar()
recent_project.set('')
recent_project.trace("w", recentchange)
conc_fullpath = StringVar()
conc_fullpath.set('')
exported_fullpath = StringVar()
exported_fullpath.set('')
log_fullpath = StringVar()
import os
home = os.path.expanduser("~")
try:
os.makedirs(os.path.join(home, 'corpkit-logs'))
except:
pass
log_fullpath.set(os.path.join(home, 'corpkit-logs'))
image_fullpath = StringVar()
image_fullpath.set('')
image_basepath = StringVar()
image_basepath.set('Select image directory')
corpora_fullpath = StringVar()
corpora_fullpath.set('')
def imagedir_modified(*args):
import matplotlib
matplotlib.rcParams['savefig.directory'] = image_fullpath.get()
image_fullpath.trace("w", imagedir_modified)
def data_getdir():
import os
fp = filedialog.askdirectory(title = 'Open data directory')
if not fp:
return
savedinterro_fullpath.set(fp)
data_basepath.set('Saved data: "%s"' % os.path.basename(fp))
#sel_corpus_button.set('Selected corpus: "%s"' % os.path.basename(newc))
#fs = sorted([d for d in os.listdir(fp) if os.path.isfile(os.path.join(fp, d))])
timestring('Set data directory: %s' % os.path.basename(fp))
def image_getdir(nodialog = False):
import os
fp = filedialog.askdirectory()
if not fp:
return
image_fullpath.set(fp)
image_basepath.set('Images: "%s"' % os.path.basename(fp))
timestring('Set image directory: %s' % os.path.basename(fp))
def save_one_or_more(kind = 'interrogation'):
sel_vals = manage_listbox_vals
if len(sel_vals) == 0:
timestring('Nothing selected to save.')
return
from corpkit.other import save
import os
saved = 0
existing = 0
# for each filename selected
for i in sel_vals:
safename = urlify(i) + '.p'
# make sure not already there
if safename not in os.listdir(savedinterro_fullpath.get()):
if kind == 'interrogation':
savedata = all_interrogations[i]
savedata.query.pop('root', None)
savedata.query.pop('note', None)
save(savedata, safename, savedir = savedinterro_fullpath.get())
else:
savedata = all_conc[i]
try:
savedata.query.pop('root', None)
savedata.query.pop('note', None)
except:
pass
save(savedata, safename, savedir = conc_fullpath.get())
saved += 1
else:
existing += 1
timestring('%s already exists in %s.' % (urlify(i), os.path.basename(savedinterro_fullpath.get())))
if saved == 1 and existing == 0:
timestring('%s saved.' % sel_vals[0])
else:
if existing == 0:
timestring('%d %ss saved.' % (len(sel_vals), kind))
else:
timestring('%d %ss saved, %d already existed' % (saved, kind, existing))
refresh()
manage_callback()
def remove_one_or_more(window=False, kind ='interrogation'):
sel_vals = manage_listbox_vals
if window is not False:
toget = prev_conc_listbox.curselection()
sel_vals = [prev_conc_listbox.get(toget)]
if len(sel_vals) == 0:
timestring('No interrogations selected.')
return
for i in sel_vals:
try:
if kind == 'interrogation':
del all_interrogations[i]
else:
del all_conc[i]
except:
pass
if len(sel_vals) == 1:
timestring('%s removed.' % sel_vals[0])
else:
timestring('%d interrogations removed.' % len(sel_vals))
if kind == 'image':
refresh_images()
refresh()
manage_callback()
def del_one_or_more(kind = 'interrogation'):
sel_vals = manage_listbox_vals
ext = '.p'
if kind == 'interrogation':
p = savedinterro_fullpath.get()
elif kind == 'image':
p = image_fullpath.get()
ext = '.png'
else:
p = conc_fullpath.get()
if len(sel_vals) == 0:
timestring('No interrogations selected.')
return
import os
result = messagebox.askquestion("Are You Sure?", "Permanently delete the following files:\n\n %s" % '\n '.join(sel_vals), icon='warning')
if result == 'yes':
for i in sel_vals:
if kind == 'interrogation':
del all_interrogations[i]
os.remove(os.path.join(p, i + ext))
elif kind == 'concordance':
del all_conc[i]
os.remove(os.path.join(p, i + ext))
else:
all_images.remove(i)
os.remove(os.path.join(p, i + ext))
if len(sel_vals) == 1:
timestring('%s deleted.' % sel_vals[0])
else:
timestring('%d %ss deleted.' % (kind, len(sel_vals)))
refresh()
manage_callback()
def urlify(s):
"Turn title into filename"
import re
#s = s.lower()
s = re.sub(r"[^\w\s-]", '', s)
s = re.sub(r"\s+", '-', s)
s = re.sub(r"-(textbf|emph|textsc|textit)", '-', s)
return s
def rename_one_or_more(kind = 'interrogation'):
ext = '.p'
sel_vals = manage_listbox_vals
if kind == 'interrogation':
p = savedinterro_fullpath.get()
elif kind == 'image':
p = image_fullpath.get()
ext = '.png'
else:
p = conc_fullpath.get()
if len(sel_vals) == 0:
timestring('No items selected.')
return
import os
permanently = True
if permanently:
perm_text='permanently '
else:
perm_text=''
for i in sel_vals:
answer = simpledialog.askstring('Rename', 'Choose a new name for "%s":' % i, initialvalue = i)
if answer is None or answer == '':
return
else:
if kind == 'interrogation':
all_interrogations[answer] = all_interrogations.pop(i)
elif kind == 'image':
ind = all_images.index(i)
all_images.remove(i)
all_images.insert(ind, answer)
else:
all_conc[answer] = all_conc.pop(i)
if permanently:
oldf = os.path.join(p, i + ext)
if os.path.isfile(oldf):
newf = os.path.join(p, urlify(answer) + ext)
os.rename(oldf, newf)
if kind == 'interrogation':
if name_of_interro_spreadsheet.get() == i:
name_of_interro_spreadsheet.set(answer)
i_resultname.set('Interrogation results: %s' % str(answer))
#update_spreadsheet(interro_results, all_interrogations[answer].results)
if name_of_o_ed_spread.get() == i:
name_of_o_ed_spread.set(answer)
#update_spreadsheet(o_editor_results, all_interrogations[answer].results)
if name_of_n_ed_spread.get() == i:
name_of_n_ed_spread.set(answer)
#update_spreadsheet(n_editor_results, all_interrogations[answer].results)
if kind == 'image':
refresh_images()
if len(sel_vals) == 1:
timestring('%s %srenamed as %s.' % (sel_vals[0], perm_text, answer))
else:
timestring('%d items %srenamed.' % (len(sel_vals), perm_text))
refresh()
manage_callback()
def export_interrogation(kind = 'interrogation'):
sel_vals = manage_listbox_vals
"""save dataframes and options to file"""
import os
import pandas
fp = False
for i in sel_vals:
answer = simpledialog.askstring('Export data', 'Choose a save name for "%s":' % i, initialvalue = i)
if answer is None or answer == '':
return
if kind != 'interrogation':
conc_export(data = i)
else:
data = all_interrogations[i]
keys = list(data.__dict__.keys())
if in_a_project.get() == 0:
if sys.platform == 'darwin':
the_kwargs = {'message': 'Choose save directory for exported interrogation'}
else:
the_kwargs = {}
fp = filedialog.askdirectory(title = 'Choose save directory', **the_kwargs)
if fp == '':
return
else:
fp = project_fullpath.get()
os.makedirs(os.path.join(exported_fullpath.get(), answer))
for k in keys:
if k == 'results':
if data.results is not None:
tkdrop = data.results.drop('tkintertable-order', errors = 'ignore')
tkdrop.to_csv(os.path.join(exported_fullpath.get(), answer, 'results.csv'), sep ='\t', encoding = 'utf-8')
if k == 'totals':
if data.totals is not None:
tkdrop = data.totals.drop('tkintertable-order', errors = 'ignore')
tkdrop.to_csv(os.path.join(exported_fullpath.get(), answer, 'totals.csv'), sep ='\t', encoding = 'utf-8')
if k == 'query':
if getattr(data, 'query', None):
pandas.DataFrame(list(data.query.values()), index = list(data.query.keys())).to_csv(os.path.join(exported_fullpath.get(), answer, 'query.csv'), sep ='\t', encoding = 'utf-8')
#if k == 'table':
# if 'table' in list(data.__dict__.keys()) and data.table:
# pandas.DataFrame(list(data.query.values()), index = list(data.query.keys())).to_csv(os.path.join(exported_fullpath.get(), answer, 'table.csv'), sep ='\t', encoding = 'utf-8')
if fp:
timestring('Results exported to %s' % (os.path.join(os.path.basename(exported_fullpath.get()), answer)))
def reset_everything():
# result names
i_resultname.set('Interrogation results:')
resultname.set('Results to edit:')
editoname.set('Edited results:')
savedplot.set('View saved images: ')
open_proj_basepath.set('Open project')
corpus_fullpath.set('')
current_corpus.set('')
corpora_fullpath.set('')
project_fullpath.set(rd)
#special_queries.set('Off')
# spreadsheets
update_spreadsheet(interro_results, df_to_show=None, height=340)
update_spreadsheet(interro_totals, df_to_show=None, height=10)
update_spreadsheet(o_editor_results, df_to_show=None, height=140)
update_spreadsheet(o_editor_totals, df_to_show=None, height=10)
update_spreadsheet(n_editor_results, df_to_show=None, height=140)
update_spreadsheet(n_editor_totals, df_to_show=None, height=10)
# interrogations
for e in list(all_interrogations.keys()):
del all_interrogations[e]
# another way:
all_interrogations.clear()
# subcorpora listbox
subc_listbox.delete(0, END)
subc_listbox_build.delete(0, END)
# concordance
conclistbox.delete(0, END)
# every interrogation
#every_interro_listbox.delete(0, END)
# every conc
#ev_conc_listbox.delete(0, END)
prev_conc_listbox.delete(0, END)
# images
#every_image_listbox.delete(0, END)
every_interrogation['menu'].delete(0, 'end')
#pick_subcorpora['menu'].delete(0, 'end')
# speaker listboxes
speaker_listbox.delete(0, 'end')
#speaker_listbox_conc.delete(0, 'end')
# keys
for e in list(all_conc.keys()):
del all_conc[e]
for e in all_images:
all_images.remove(e)
#update_available_corpora(delete = True)
refresh()
def convert_speakdict_to_string(dictionary):
"""turn speaker info dict into a string for configparser"""
if not dictionary:
return 'none'
out = []
for k, v in list(dictionary.items()):
out.append('%s:%s' % (k, ','.join([i.replace(',', '').replace(':', '').replace(';', '') for i in v])))
if not out:
return 'none'
else:
return ';'.join(out)
def parse_speakdict(string):
"""turn configparser's speaker info back into a dict"""
if string is 'none' or not string:
return {}
redict = {}
corps = string.split(';')
for c in corps:
try:
name, vals = c.split(':')
except ValueError:
continue
vs = vals.split(',')
redict[name] = vs
return redict
def load_custom_list_json():
import json
f = os.path.join(project_fullpath.get(), 'custom_wordlists.txt')
if os.path.isfile(f):
data = json.loads(open(f).read())
for k, v in data.items():
if k not in list(custom_special_dict.keys()):
custom_special_dict[k] = v
if k not in list(saved_special_dict.keys()):
saved_special_dict[k] = v
def dump_custom_list_json():
import json
f = os.path.join(project_fullpath.get(), 'custom_wordlists.txt')
with open(f, 'w') as fo:
fo.write(json.dumps(saved_special_dict))
def load_config():
"""use configparser to get project settings"""
import os
try:
import configparser
except ImportError:
import ConfigParser as configparser
Config = configparser.ConfigParser()
f = os.path.join(project_fullpath.get(), 'settings.ini')
Config.read(f)
# errors here
plot_style.set(conmap(Config, "Visualise")['plot style'])
texuse.set(conmap(Config, "Visualise")['use tex'])
x_axis_l.set(conmap(Config, "Visualise")['x axis title'])
chart_cols.set(conmap(Config, "Visualise")['colour scheme'])
rel_corpuspath = conmap(Config, "Interrogate")['corpus path']
try:
files_as_subcorpora.set(conmap(Config, "Interrogate")['treat files as subcorpora'])
except KeyError:
files_as_subcorpora.set(False)
if rel_corpuspath:
current_corpus.get(relcorpuspath)
#corpus_fullpath.set(corpa)
spk = conmap(Config, "Interrogate")['speakers']
corpora_speakers = parse_speakdict(spk)
for i, v in list(corpora_speakers.items()):
corpus_names_and_speakers[i] = v
fsize.set(conmap(Config, "Concordance")['font size'])
# window setting causes conc_sort to run, causing problems.
#win.set(conmap(Config, "Concordance")['window'])
#kind_of_dep.set(conmap(Config, 'Interrogate')['dependency type'])
#conc_kind_of_dep.set(conmap(Config, "Concordance")['dependency type'])
cods = conmap(Config, "Concordance")['coding scheme']
if cods is None:
for _, val in list(entryboxes.items()):
val.set('')
else:
codsep = cods.split(',')
for (box, val), cod in zip(list(entryboxes.items()), codsep):
val.set(cod)
if corpus_fullpath.get():
subdrs = [d for d in os.listdir(corpus_fullpath.get()) if os.path.isdir(os.path.join(corpus_fullpath.get(),d))]
else:
subdrs = []
if len(subdrs) == 0:
charttype.set('bar')
refresh()
def load_project(path=False):
import os
if path is False:
if sys.platform == 'darwin':
the_kwargs = {'message': 'Choose project directory'}
else:
the_kwargs = {}
fp = filedialog.askdirectory(title='Open project',
**the_kwargs)
else:
fp = os.path.abspath(path)
if not fp or fp == '':
return
reset_everything()
image_fullpath.set(os.path.join(fp, 'images'))
savedinterro_fullpath.set(os.path.join(fp, 'saved_interrogations'))
conc_fullpath.set(os.path.join(fp, 'saved_concordances'))
exported_fullpath.set(os.path.join(fp, 'exported'))
corpora_fullpath.set(os.path.join(fp, 'data'))
log_fullpath.set(os.path.join(fp, 'logs'))
if not os.path.isdir(savedinterro_fullpath.get()):
timestring('Selected folder does not contain corpkit project.')
return
project_fullpath.set(fp)
f = os.path.join(project_fullpath.get(), 'settings.ini')
if os.path.isfile(f):
load_config()
os.chdir(fp)
list_of_corpora = update_available_corpora()
addbut.config(state=NORMAL)
get_saved_results(kind='interrogation')
get_saved_results(kind='concordance')
get_saved_results(kind='image')
open_proj_basepath.set('Loaded: "%s"' % os.path.basename(fp))
# reset tool:
root.title("corpkit: %s" % os.path.basename(fp))
# check for parsed corpora
if not current_corpus.get():
parsed_corp = [d for d in list_of_corpora if d.endswith('-parsed')]
# select
first = False
if len(parsed_corp) > 0:
first = parsed_corp[0]
if first:
corpus_fullpath.set(os.path.abspath(first))
name = make_corpus_name_from_abs(project_fullpath.get(), first)
current_corpus.set(name)
else:
corpus_fullpath.set('')
# no corpora, so go to build...
note.focus_on(tab0)
if corpus_fullpath.get() != '':
try:
subdrs = sorted([d for d in os.listdir(corpus_fullpath.get()) if os.path.isdir(os.path.join(corpus_fullpath.get(),d))])
except FileNotFoundError:
subdrs = []
else:
subdrs = []
#lab.set('Concordancing: %s' % corpus_name)
#pick_subcorpora['menu'].delete(0, 'end')
#if len(subdrs) > 0:
# pick_subcorpora['menu'].add_command(label='all', command=_setit(subc_pick, 'all'))
# pick_subcorpora.config(state=NORMAL)
# for choice in subdrs:
# pick_subcorpora['menu'].add_command(label=choice, command=_setit(subc_pick, choice))
#else:
# pick_subcorpora.config(state=NORMAL)
# pick_subcorpora['menu'].add_command(label='None', command=_setit(subc_pick, 'None'))
# pick_subcorpora.config(state=DISABLED)
timestring('Project "%s" opened.' % os.path.basename(fp))
note.progvar.set(0)
#if corpus_name in list(corpus_names_and_speakers.keys()):
refresh_by_metadata()
#speakcheck.config(state=NORMAL)
#else:
# pass
#speakcheck.config(state=DISABLED)
load_custom_list_json()
def view_query(kind=False):
if len(manage_listbox_vals) == 0:
return
if len(manage_listbox_vals) > 1:
timestring('Can only view one interrogation at a time.')
return
global frame_to_the_right
frame_to_the_right = Frame(manage_pop)
frame_to_the_right.grid(column=2, row=0, rowspan = 6)
Label(frame_to_the_right, text='Query information', font=("Helvetica", 13, "bold")).grid(sticky=W, row=0, column=0, padx=(10,0))
mlb = Table(frame_to_the_right, ['Option', 'Value'],
column_weights=[1, 1], height=70, width=30)
mlb.grid(sticky=N, column=0, row=1)
for i in mlb._mlb.listboxes:
i.config(height=29)
mlb.columnconfig('Option', background='#afa')
mlb.columnconfig('Value', background='#efe')
q_dict = dict(all_interrogations[manage_listbox_vals[0]].query)
mlb.clear()
#show_query_vals.delete(0, 'end')
flipped_trans = {v: k for k, v in list(transdict.items())}
for d in ['dataframe1', 'dataframe2']:
q_dict.pop(d, None)
for k, v in sorted(q_dict.items()):
try:
if isinstance(v, (int, float)) and v == 0:
v = '0'
if v is None:
v == 'None'
if not v:
v = 'False'
if v is True:
v = 'True'
# could be bad with threshold etc
if v == 1:
v = 'True'
except:
pass
mlb.append([k, v])
if q_dict.get('query'):
qubox = Text(frame_to_the_right, font=("Courier New", 14), relief=SUNKEN,
wrap=WORD, width=40, height=5, undo=True)
qubox.grid(column=0, row=2, rowspan = 1, padx=(10,0))
qubox.delete(1.0, END)
qubox.insert(END, q_dict['query'])
manage_box['qubox'] = qubox
bind_textfuncts_to_widgets([qubox])
else:
try:
manage_box['qubox'].destroy()
except:
pass
manage_listbox_vals = []
def onselect_manage(evt):
# remove old vals
for i in manage_listbox_vals:
manage_listbox_vals.pop()
wx = evt.widget
indices = wx.curselection()
for index in indices:
value = wx.get(index)
if value not in manage_listbox_vals:
manage_listbox_vals.append(value)
new_proj_basepath = StringVar()
new_proj_basepath.set('New project')
open_proj_basepath = StringVar()
open_proj_basepath.set('Open project')
the_current_kind = StringVar()
def manage_popup():
from tkinter import Toplevel
global manage_pop
manage_pop = Toplevel()
manage_pop.geometry('+400+40')
manage_pop.title("Manage data: %s" % os.path.basename(project_fullpath.get()))
manage_pop.wm_attributes('-topmost', 1)
manage_what = StringVar()
manage_what.set('Manage: ')
#Label(manage_pop, textvariable=manage_what).grid(row=0, column=0, sticky='W', padx=(5, 0))
manag_frame = Frame(manage_pop, height=30)
manag_frame.grid(column=0, row=1, rowspan = 1, columnspan=2, sticky='NW', padx=10)
manage_scroll = Scrollbar(manag_frame)
manage_scroll.pack(side=RIGHT, fill=Y)
manage_listbox = Listbox(manag_frame, selectmode = SINGLE, height=30, width=30, relief=SUNKEN, bg='#F4F4F4',
yscrollcommand=manage_scroll.set, exportselection=False)
manage_listbox.pack(fill=BOTH)
manage_listbox.select_set(0)
manage_scroll.config(command=manage_listbox.yview)
xx = manage_listbox.bind('<<ListboxSelect>>', onselect_manage)
# default: w option
manage_listbox.select_set(0)
the_current_kind.set('interrogation')
#gtsv = StringVar()
#gtsv.set('Get saved')
#getbut = Button(manage_pop, textvariable=gtsv, command=lambda: get_saved_results(), width=22)
#getbut.grid(row=2, column=0, columnspan=2)
manage_type = StringVar()
manage_type.set('Interrogations')
#Label(manage_pop, text='Save selected: ').grid(sticky=E, row=6, column=1)
savebut = Button(manage_pop, text='Save', command=lambda: save_one_or_more(kind = the_current_kind.get()))
savebut.grid(padx=15, sticky=W, column=0, row=3)
viewbut = Button(manage_pop, text='View', command=lambda: view_query(kind = the_current_kind.get()))
viewbut.grid(padx=15, sticky=W, column=0, row=4)
renamebut = Button(manage_pop, text='Rename', command=lambda: rename_one_or_more(kind = the_current_kind.get()))
renamebut.grid(padx=15, sticky=W, column=0, row=5)
#Checkbutton(manage_pop, text="Permanently", variable=perm, onvalue=True, offvalue=False).grid(column=1, row=16, padx=15, sticky=W)
exportbut = Button(manage_pop, text='Export', command=lambda: export_interrogation(kind = the_current_kind.get()))
exportbut.grid(padx=15, sticky=E, column=1, row=3)
#Label(manage_pop, text='Remove selected: '()).grid(padx=15, sticky=W, row=4, column=0)
removebut = Button(manage_pop, text='Remove', command= lambda: remove_one_or_more(kind = the_current_kind.get()))
removebut.grid(padx=15, sticky=E, column=1, row=4)
#Label(manage_pop, text='Delete selected: '()).grid(padx=15, sticky=E, row=5, column=1)
deletebut = Button(manage_pop, text='Delete', command=lambda: del_one_or_more(kind = the_current_kind.get()))
deletebut.grid(padx=15, sticky=E, column=1, row=5)
to_manage = OptionMenu(manage_pop, manage_type, *tuple(('Interrogations', 'Concordances', 'Images')))
to_manage.config(width=32, justify=CENTER)
to_manage.grid(row=0, column=0, columnspan=2)
def managed(*args):
#vals = [i.get() for i in butvar.values() if i.get() is not False and i.get() != 0 and i.get() != '0']
#vals = sorted(vals, key=lambda x:orders[x])
#the_opts = ','.join(vals)]
manage_pop.destroy()
try:
del manage_callback
except:
pass
global manage_callback
def manage_callback(*args):
import os
"""show correct listbox, enable disable buttons below"""
# set text
#manage_what.set('Manage %s' % manage_type.get().lower())
#gtsv.set('Get saved %s' % manage_type.get().lower())
# set correct action for buttons
the_current_kind.set(manage_type.get().lower().rstrip('s'))
#get_saved_results(kind = the_current_kind.get())
# enable all buttons
#getbut.config(state=NORMAL)
#try:
savebut.config(state=NORMAL)
viewbut.config(state=NORMAL)
renamebut.config(state=NORMAL)
exportbut.config(state=NORMAL)
removebut.config(state=NORMAL)
deletebut.config(state=NORMAL)
manage_listbox.delete(0, 'end')
if the_current_kind.get() == 'interrogation':
the_path = savedinterro_fullpath.get()
the_ext = '.p'
list_of_entries = list(all_interrogations.keys())
elif the_current_kind.get() == 'concordance':
the_path = conc_fullpath.get()
the_ext = '.p'
list_of_entries = list(all_conc.keys())
viewbut.config(state=DISABLED)
try:
frame_to_the_right.destroy()
except:
pass
elif the_current_kind.get() == 'image':
the_path = image_fullpath.get()
the_ext = '.png'
refresh_images()
list_of_entries = all_images
viewbut.config(state=DISABLED)
savebut.config(state=DISABLED)
exportbut.config(state=DISABLED)
removebut.config(state=DISABLED)
try:
frame_to_the_right.destroy()
except:
pass
for datum in list_of_entries:
manage_listbox.insert(END, datum)
color_saved(manage_listbox, the_path, '#ccebc5', '#fbb4ae', ext = the_ext)
manage_type.trace("w", manage_callback)
manage_type.set('Interrogations')
############## ############## ############## ############## ##############
# BUILD TAB # # BUILD TAB # # BUILD TAB # # BUILD TAB # # BUILD TAB #
############## ############## ############## ############## ##############
from corpkit.build import download_large_file, get_corpus_filepaths, \
check_jdk, parse_corpus, move_parsed_files, corenlp_exists
def create_tokenised_text():
from corpkit.corpus import Corpus
note.progvar.set(0)
parser_options('tokenise')
root.wait_window(poptions)
if not clicked_done.get():
return
#tokbut.config(state=DISABLED)
#tokbut = Button(tab0, textvariable=tokenise_button_text, command=ignore, width=33)
#tokbut.grid(row=6, column=0, sticky=W)
unparsed_corpus_path = corpus_fullpath.get()
#filelist, _ = get_corpus_filepaths(project_fullpath.get(), unparsed_corpus_path)
corp = Corpus(unparsed_corpus_path, print_info=False)
parsed = corp.tokenise(postag=tokenise_pos,
lemmatise=tokenise_lem,
root=root,
stdout=sys.stdout,
note=note,
nltk_data_path=nltk_data_path,
speaker_segmentation=speakseg.get(),
metadata=parse_with_metadata.get())
#corpus_fullpath.set(outdir)
outdir = parsed.path
current_corpus.set(parsed.name)
subdrs = [d for d in os.listdir(corpus_fullpath.get()) if os.path.isdir(os.path.join(corpus_fullpath.get(),d))]
if len(subdrs) == 0:
charttype.set('bar')
#basepath.set(os.path.basename(outdir))
#if len([f for f in os.listdir(outdir) if f.endswith('.p')]) > 0:
timestring('Corpus parsed and ready to interrogate: "%s"' % os.path.basename(outdir))
#else:
#timestring('Error: no files created in "%s"' % os.path.basename(outdir))
update_available_corpora()
def create_parsed_corpus():
import os
import re
import corpkit
from corpkit.corpus import Corpus
from corpkit.process import get_corenlp_path
parser_options('parse')
root.wait_window(poptions)
if not clicked_done.get():
return
unparsed_corpus_path = corpus_fullpath.get()
unparsed = Corpus(unparsed_corpus_path, print_info=False)
note.progvar.set(0)
unparsed_corpus_path = corpus_fullpath.get()
corenlppath.set(get_corenlp_path(corenlppath.get()))
if not corenlppath.get() or corenlppath.get() == 'None':
downstall_nlp = messagebox.askyesno("CoreNLP not found.",
"CoreNLP parser not found. Download/install it?")
if not downstall_nlp:
timestring('Cannot parse data without Stanford CoreNLP.')
return
jdk = check_jdk()
if jdk is False:
downstall_jdk = messagebox.askyesno("Java JDK", "You need Java JDK 1.8 to use CoreNLP.\n\nHit 'yes' to open web browser at download link. Once installed, corpkit should resume automatically")
if downstall_jdk:
import webbrowser
webbrowser.open_new('http://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html')
import time
timestring('Waiting for Java JDK 1.8 installation to complete.')
while jdk is False:
jdk = check_jdk()
timestring('Waiting for Java JDK 1.8 installation to complete.')
time.sleep(5)
else:
timestring('Cannot parse data without Java JDK 1.8.')
return
parsed = unparsed.parse(speaker_segmentation=speakseg.get(),
proj_path=project_fullpath.get(),
copula_head=True,
multiprocess=False,
corenlppath=corenlppath.get(),
operations=parser_opts.get(),
root=root,
stdout=sys.stdout,
note=note,
memory_mb=parser_memory.get(),
metadata=parse_with_metadata.get())
if not parsed:
print('Error during parsing.')
sys.stdout = note.redir
current_corpus.set(parsed.name)
subdrs = [d for d in os.listdir(corpus_fullpath.get()) if \
os.path.isdir(os.path.join(corpus_fullpath.get(), d))]
if len(subdrs) == 0:
charttype.set('bar')
update_available_corpora()
timestring('Corpus parsed and ready to interrogate: "%s"' % parsed.name)
parse_button_text=StringVar()
parse_button_text.set('Create parsed corpus')
tokenise_button_text=StringVar()
tokenise_button_text.set('Create tokenised corpus')
path_to_new_unparsed_corpus = StringVar()
path_to_new_unparsed_corpus.set('')
add_corpus = StringVar()
add_corpus.set('')
add_corpus_button = StringVar()
add_corpus_button.set('Add corpus%s' % add_corpus.get())
selected_corpus_has_no_subcorpora = IntVar()
selected_corpus_has_no_subcorpora.set(0)
def add_subcorpora_to_build_box(path_to_corpus):
if not path_to_corpus:
return
import os
subc_listbox_build.configure(state=NORMAL)
subc_listbox_build.delete(0, 'end')
sub_corpora = [d for d in os.listdir(path_to_corpus) if os.path.isdir(os.path.join(path_to_corpus, d))]
if len(sub_corpora) == 0:
selected_corpus_has_no_subcorpora.set(1)
subc_listbox_build.bind('<<Modified>>', onselect_subc_build)
subc_listbox_build.insert(END, 'No subcorpora found.')
subc_listbox_build.configure(state=DISABLED)
else:
selected_corpus_has_no_subcorpora.set(0)
for e in sub_corpora:
subc_listbox_build.insert(END, e)
onselect_subc_build()
def select_corpus():
"""selects corpus for viewing/parsing
---not used anymore"""
from os.path import join as pjoin
from os.path import basename as bn
#parse_button_text.set('Parse: "%s"' % bn(unparsed_corpus_path))
tokenise_button_text.set('Tokenise: "%s"' % bn(unparsed_corpus_path))
path_to_new_unparsed_corpus.set(unparsed_corpus_path)
#add_corpus_button.set('Added: %s' % bn(unparsed_corpus_path))
where_to_put_corpus = pjoin(project_fullpath.get(), 'data')
sel_corpus.set(unparsed_corpus_path)
#sel_corpus_button.set('Selected: "%s"' % bn(unparsed_corpus_path))
parse_button_text.set('Parse: "%s"' % bn(unparsed_corpus_path))
add_subcorpora_to_build_box(unparsed_corpus_path)
timestring('Selected corpus: "%s"' % bn(unparsed_corpus_path))
def getcorpus():
"""copy unparsed texts to project folder"""
import shutil
import os
from corpkit.process import saferead
home = os.path.expanduser("~")
docpath = os.path.join(home, 'Documents')
if sys.platform == 'darwin':
the_kwargs = {'message': 'Select your corpus of unparsed text files.'}
else:
the_kwargs = {}
fp = filedialog.askdirectory(title = 'Path to unparsed corpus',
initialdir = docpath,
**the_kwargs)
where_to_put_corpus = os.path.join(project_fullpath.get(), 'data')
newc = os.path.join(where_to_put_corpus, os.path.basename(fp))
try:
shutil.copytree(fp, newc)
timestring('Corpus copied to project folder.')
except OSError:
if os.path.basename(fp) == '':
return
timestring('"%s" already exists in project.' % os.path.basename(fp))
return
from corpkit.build import folderise, can_folderise
if can_folderise(newc):
do_folderise = messagebox.askyesno("No subcorpora found",
"Your corpus contains multiple files, but no subfolders. " \
"Would you like to treat each file as a subcorpus?")
if do_folderise:
folderise(newc)
timestring('Turned files into subcorpora.')
# encode and rename files
for (rootdir, d, fs) in os.walk(newc):
for f in fs:
fpath = os.path.join(rootdir, f)
data, enc = saferead(fpath)
from corpkit.constants import OPENER, PYTHON_VERSION
with OPENER(fpath, "w") as f:
if PYTHON_VERSION == 2:
f.write(data.encode('utf-8', errors='ignore'))
else:
f.write(data)
# rename file
#dname = '-' + os.path.basename(rootdir)
#newname = fpath.replace('.txt', dname + '.txt')
#shutil.move(fpath, newname)
path_to_new_unparsed_corpus.set(newc)
add_corpus_button.set('Added: "%s"' % os.path.basename(fp))
current_corpus.set(os.path.basename(fp))
#sel_corpus.set(newc)
#sel_corpus_button.set('Selected corpus: "%s"' % os.path.basename(newc))
timestring('Corpus copied to project folder.')
parse_button_text.set('Parse: %s' % os.path.basename(newc))
tokenise_button_text.set('Tokenise: "%s"' % os.path.basename(newc))
add_subcorpora_to_build_box(newc)
update_available_corpora()
timestring('Selected corpus for viewing/parsing: "%s"' % os.path.basename(newc))
Label(tab0, text='Project', font=("Helvetica", 13, "bold")).grid(sticky=W, row=0, column=0)
#Label(tab0, text='New project', font=("Helvetica", 13, "bold")).grid(sticky=W, row=0, column=0)
Button(tab0, textvariable=new_proj_basepath, command=make_new_project, width=24).grid(row=1, column=0, sticky=W)
#Label(tab0, text='Open project: ').grid(row=2, column=0, sticky=W)
Button(tab0, textvariable=open_proj_basepath, command=load_project, width=24).grid(row=2, column=0, sticky=W)
#Label(tab0, text='Add corpus to project: ').grid(row=4, column=0, sticky=W)
addbut = Button(tab0, textvariable=add_corpus_button, width=24, state=DISABLED)
addbut.grid(row=3, column=0, sticky=W)
addbut.config(command=lambda: runner(addbut, getcorpus))
#Label(tab0, text='Corpus to parse: ').grid(row=6, column=0, sticky=W)
#Button(tab0, textvariable=sel_corpus_button, command=select_corpus, width=24).grid(row=4, column=0, sticky=W)
#Label(tab0, text='Parse: ').grid(row=8, column=0, sticky=W)
#speakcheck_build = Checkbutton(tab0, text="Speaker segmentation", variable=speakseg, state=DISABLED)
#speakcheck_build.grid(column=0, row=5, sticky=W)
parsebut = Button(tab0, textvariable=parse_button_text, width=24, state=DISABLED)
parsebut.grid(row=5, column=0, sticky=W)
parsebut.config(command=lambda: runner(parsebut, create_parsed_corpus))
#Label(tab0, text='Parse: ').grid(row=8, column=0, sticky=W)
tokbut = Button(tab0, textvariable=tokenise_button_text, width=24, state=DISABLED)
tokbut.grid(row=6, column=0, sticky=W)
tokbut.config(command=lambda: runner(tokbut, create_tokenised_text))
def onselect_subc_build(evt = False):
"""get selected subcorpus, delete editor, show files in subcorpus"""
import os
if evt:
# should only be one
for i in subc_sel_vals_build:
subc_sel_vals_build.pop()
wx = evt.widget
indices = wx.curselection()
for index in indices:
value = wx.get(index)
if value not in subc_sel_vals_build:
subc_sel_vals_build.append(value)
# return for false click
if len(subc_sel_vals_build) == 0 and selected_corpus_has_no_subcorpora.get() == 0:
return
# destroy editor and canvas if possible
for ob in list(buildbits.values()):
try:
ob.destroy()
except:
pass
f_view.configure(state=NORMAL)
f_view.delete(0, 'end')
newp = path_to_new_unparsed_corpus.get()
if selected_corpus_has_no_subcorpora.get() == 0:
newsub = os.path.join(newp, subc_sel_vals_build[0])
else:
newsub = newp
fs = [f for f in os.listdir(newsub) if f.endswith('.txt') \
or f.endswith('.xml') \
or f.endswith('.conll') \
or f.endswith('.conllu')]
for e in fs:
f_view.insert(END, e)
if selected_corpus_has_no_subcorpora.get() == 0:
f_in_s.set('Files in subcorpus: %s' % subc_sel_vals_build[0])
else:
f_in_s.set('Files in corpus: %s' % os.path.basename(path_to_new_unparsed_corpus.get()))
# a listbox of subcorpora
Label(tab0, text='Subcorpora', font=("Helvetica", 13, "bold")).grid(row=7, column=0, sticky=W)
height = 21 if small_screen else 24
build_sub_f = Frame(tab0, width=24, height=height)
build_sub_f.grid(row=8, column=0, sticky=W, rowspan = 2, padx=(8,0))
build_sub_sb = Scrollbar(build_sub_f)
build_sub_sb.pack(side=RIGHT, fill=Y)
subc_listbox_build = Listbox(build_sub_f, selectmode = SINGLE, height=height, state=DISABLED, relief=SUNKEN, bg='#F4F4F4',
yscrollcommand=build_sub_sb.set, exportselection=False, width=24)
subc_listbox_build.pack(fill=BOTH)
xxy = subc_listbox_build.bind('<<ListboxSelect>>', onselect_subc_build)
subc_listbox_build.select_set(0)
build_sub_sb.config(command=subc_listbox_build.yview)
def show_a_tree(evt):
"""get selected file and show in file view"""
import os
from nltk import Tree
from nltk.tree import ParentedTree
from nltk.draw.util import CanvasFrame
from nltk.draw import TreeWidget
sbox = buildbits['sentsbox']
sent = sentdict[int(sbox.curselection()[0])]
t = ParentedTree.fromstring(sent)
# make a frame attached to tab0
#cf = CanvasFrame(tab0, width=200, height=200)
cf = Canvas(tab0, width=800, height=400, bd=5)
buildbits['treecanvas'] = cf
cf.grid(row=5, column=2, rowspan = 11, padx=(0,0))
if cf not in boxes:
boxes.append(cf)
# draw the tree and send to the frame's canvas
tc = TreeWidget(cf, t, draggable=1,
node_font=('helvetica', -10, 'bold'),
leaf_font=('helvetica', -10, 'italic'),
roof_fill='white', roof_color='black',
leaf_color='green4', node_color='blue2')
tc.bind_click_trees(tc.toggle_collapsed)
def select_all_editor(*args):
"""not currently using, but might be good for select all"""
editor = buildbits['editor']
editor.tag_add(SEL, "1.0", END)
editor.mark_set(INSERT, "1.0")
editor.see(INSERT)
return 'break'
def onselect_f(evt):
"""get selected file and show in file view"""
for box in boxes:
try:
box.destroy()
except:
pass
import os
# should only be one
for i in chosen_f:
chosen_f.pop()
wx = evt.widget
indices = wx.curselection()
for index in indices:
value = wx.get(index)
if value not in chosen_f:
chosen_f.append(value)
if len(chosen_f) == 0:
return
if chosen_f[0].endswith('.txt'):
newp = path_to_new_unparsed_corpus.get()
if selected_corpus_has_no_subcorpora.get() == 0:
fp = os.path.join(newp, subc_sel_vals_build[0], chosen_f[0])
else:
fp = os.path.join(newp, chosen_f[0])
if not os.path.isfile(fp):
fp = os.path.join(newp, os.path.basename(corpus_fullpath.get()), chosen_f[0])
from corpkit.constants import OPENER
with OPENER(fp, 'r', encoding='utf-8') as fo:
text = fo.read()
# needs a scrollbar
editor = Text(tab0, height=32)
bind_textfuncts_to_widgets([editor])
buildbits['editor'] = editor
editor.grid(row=1, column=2, rowspan=9, pady=(10,0), padx=(20, 0))
if editor not in boxes:
boxes.append(editor)
all_text_widgets.append(editor)
editor.bind("<%s-s>" % key, savebuttonaction)
editor.bind("<%s-S>" % key, savebuttonaction)
editor.config(borderwidth=0,
font="{Lucida Sans Typewriter} 12",
#foreground="green",
#background="black",
#insertbackground="white", # cursor
#selectforeground="green", # selection
#selectbackground="#008000",
wrap=WORD, # use word wrapping
width=64,
undo=True, # Tk 8.4
)
editor.delete(1.0, END)
editor.insert(END, text)
editor.mark_set(INSERT, 1.0)
editf.set('Edit file: %s' % chosen_f[0])
viewedit = Label(tab0, textvariable=editf, font=("Helvetica", 13, "bold"))
viewedit.grid(row=0, column=2, sticky=W, padx=(20, 0))
if viewedit not in boxes:
boxes.append(viewedit)
filename.set(chosen_f[0])
fullpath_to_file.set(fp)
but = Button(tab0, text='Save changes', command=savebuttonaction)
but.grid(row=9, column=2, sticky='SE')
buildbits['but'] = but
if but not in boxes:
boxes.append(but)
elif chosen_f[0].endswith('.conll') or chosen_f[0].endswith('.conllu'):
import re
parsematch = re.compile(r'^# parse=(.*)')
newp = path_to_new_unparsed_corpus.get()
if selected_corpus_has_no_subcorpora.get() == 0:
fp = os.path.join(newp, subc_sel_vals_build[0], chosen_f[0])
else:
fp = os.path.join(newp, chosen_f[0])
if not os.path.isfile(fp):
fp = os.path.join(newp, os.path.basename(corpus_fullpath.get()), chosen_f[0])
from corpkit.constants import OPENER
with OPENER(fp, 'r', encoding='utf-8') as fo:
text = fo.read()
lines = text.splitlines()
editf.set('View trees: %s' % chosen_f[0])
vieweditxml = Label(tab0, textvariable=editf, font=("Helvetica", 13, "bold"))
vieweditxml.grid(row=0, column=2, sticky=W, padx=(20,0))
buildbits['vieweditxml'] = vieweditxml
if vieweditxml not in boxes:
boxes.append(vieweditxml)
trees = []
def flatten_treestring(tree):
replaces = {'$ ': '$',
'`` ': '``',
' ,': ',',
' .': '.',
"'' ": "''",
" n't": "n't",
" 're": "'re",
" 'm": "'m",
" 's": "'s",
" 'd": "'d",
" 'll": "'ll",
' ': ' '}
import re
tree = re.sub(r'\(.*? ', '', tree).replace(')', '')
for k, v in replaces.items():
tree = tree.replace(k, v)
return tree
for l in lines:
searched = re.search(parsematch, l)
if searched:
bracktree = searched.group(1)
flat = flatten_treestring(bracktree)
trees.append([bracktree, flat])
sentsbox = Listbox(tab0, selectmode=SINGLE, width=120, font=("Courier New", 11))
if sentsbox not in boxes:
boxes.append(sentsbox)
buildbits['sentsbox'] = sentsbox
sentsbox.grid(row=1, column=2, rowspan=4, padx=(20,0))
sentsbox.delete(0, END)
for i in list(sentdict.keys()):
del sentdict[i]
for i, (t, f) in enumerate(trees):
cutshort = f[:80] + '...'
sentsbox.insert(END, '%d: %s' % (i + 1, f))
sentdict[i] = t
xxyyz = sentsbox.bind('<<ListboxSelect>>', show_a_tree)
f_in_s = StringVar()
f_in_s.set('Files in subcorpus ')
# a listbox of files
Label(tab0, textvariable=f_in_s, font=("Helvetica", 13, "bold")).grid(row=0, column=1, sticky='NW', padx=(30, 0))
height = 31 if small_screen else 36
build_f_box = Frame(tab0, height=height)
build_f_box.grid(row=1, column=1, rowspan = 9, padx=(20, 0), pady=(10, 0))
build_f_sb = Scrollbar(build_f_box)
build_f_sb.pack(side=RIGHT, fill=Y)
f_view = Listbox(build_f_box, selectmode = EXTENDED, height=height, state=DISABLED, relief=SUNKEN, bg='#F4F4F4',
exportselection=False, yscrollcommand=build_f_sb.set)
f_view.pack(fill=BOTH)
xxyy = f_view.bind('<<ListboxSelect>>', onselect_f)
f_view.select_set(0)
build_f_sb.config(command=f_view.yview)
editf = StringVar()
editf.set('Edit file: ')
def savebuttonaction(*args):
from corpkit.constants import OPENER, PYTHON_VERSION
editor = buildbits['editor']
text = editor.get(1.0, END)
with OPENER(fullpath_to_file.get(), "w") as fo:
if PYTHON_VERSION == 2:
fo.write(text.rstrip().encode("utf-8"))
fo.write("\n")
else:
fo.write(text.rstrip() + '\n')
timestring('%s saved.' % filename.get())
filename = StringVar()
filename.set('')
fullpath_to_file = StringVar()
fullpath_to_file.set('')
############ ############ ############ ############ ############
# MENU BAR # # MENU BAR # # MENU BAR # # MENU BAR # # MENU BAR #
############ ############ ############ ############ ############
realquit = IntVar()
realquit.set(0)
def clear_all():
import os
import sys
python = sys.executable
os.execl(python, python, * sys.argv)
def get_tool_pref_file():
"""get the location of the tool preferences files"""
return os.path.join(rd, 'tool_settings.ini')
def save_tool_prefs(printout=True):
"""save any preferences to tool preferences"""
try:
import configparser
except:
import ConfigParser as configparser
import os
Config = configparser.ConfigParser()
settingsfile = get_tool_pref_file()
if settingsfile is None:
timestring('No settings file found.')
return
# parsing for ints is causing errors?
Config.add_section('Projects')
Config.set('Projects','most recent', ';'.join(most_recent_projects[-5:]).lstrip(';'))
Config.add_section('CoreNLP')
Config.set('CoreNLP','Parser path', corenlppath.get())
Config.set('CoreNLP','Memory allocation', str(parser_memory.get()))
Config.add_section('Appearance')
Config.set('Appearance','Spreadsheet row header width', str(row_label_width.get()))
Config.set('Appearance','Spreadsheet cell width', str(cell_width.get()))
Config.add_section('Other')
Config.set('Other','Truncate concordance lines', str(truncate_conc_after.get()))
Config.set('Other','Truncate spreadsheets', str(truncate_spreadsheet_after.get()))
Config.set('Other','Automatic update check', str(do_auto_update.get()))
Config.set('Other','do concordancing', str(do_concordancing.get()))
Config.set('Other','Only format middle concordance column', str(only_format_match.get()))
Config.set('Other','p value', str(p_val.get()))
cfgfile = open(settingsfile ,'w')
Config.write(cfgfile)
#cell_width.get()
#row_label_width.get()
#truncate_conc_after.get()
#truncate_spreadsheet_after.get()
#do_auto_update.get()
if printout:
timestring('Tool preferences saved.')
def load_tool_prefs():
"""load preferences"""
import os
try:
import configparser
except:
import ConfigParser as configparser
settingsfile = get_tool_pref_file()
if settingsfile is None:
timestring('No settings file found.')
return
if not os.path.isfile(settingsfile):
timestring('No settings file found at %s' % settingsfile)
return
def tryer(config, var, section, name):
"""attempt to load a value, fail gracefully if not there"""
try:
if config.has_option(section, name):
bit = conmap(config, section).get(name, False)
if name in ['memory allocation', 'truncate spreadsheets',
'truncate concordance lines', 'p value']:
bit = int(bit)
else:
bit = bool(bit)
var.set(bit)
except:
pass
Config = configparser.ConfigParser()
Config.read(settingsfile)
tryer(Config, parser_memory, "CoreNLP", "memory allocation")
#tryer(Config, row_label_width, "Appearance", 'spreadsheet row header width')
#tryer(Config, cell_width, "Appearance", 'spreadsheet cell width')
tryer(Config, do_auto_update, "Other", 'automatic update check')
#tryer(Config, conc_when_int, "Other", 'concordance when interrogating')
tryer(Config, only_format_match, "Other", 'only format middle concordance column')
tryer(Config, do_concordancing, "Other", 'do concordancing')
#tryer(Config, noregex, "Other", 'disable regular expressions for plaintext search')
tryer(Config, truncate_conc_after, "Other", 'truncate concordance lines')
tryer(Config, truncate_spreadsheet_after, "Other", 'truncate spreadsheets')
tryer(Config, p_val, "Other", 'p value')
try:
parspath = conmap(Config, "CoreNLP")['parser path']
except:
parspath = 'default'
try:
mostrec = conmap(Config, "Projects")['most recent'].lstrip(';').split(';')
for i in mostrec:
most_recent_projects.append(i)
except:
pass
if parspath == 'default' or parspath == '':
corenlppath.set(os.path.join(os.path.expanduser("~"), 'corenlp'))
else:
corenlppath.set(parspath)
timestring('Tool preferences loaded.')
def save_config():
try:
import configparser
except:
import ConfigParser as configparser
import os
if any(v != '' for v in list(entryboxes.values())):
codscheme = ','.join([i.get().replace(',', '') for i in list(entryboxes.values())])
else:
codscheme = None
Config = configparser.ConfigParser()
cfgfile = open(os.path.join(project_fullpath.get(), 'settings.ini') ,'w')
Config.add_section('Build')
Config.add_section('Interrogate')
relcorpuspath = corpus_fullpath.get().replace(project_fullpath.get(), '').lstrip('/')
Config.set('Interrogate','Corpus path', relcorpuspath)
Config.set('Interrogate','Speakers', convert_speakdict_to_string(corpus_names_and_speakers))
#Config.set('Interrogate','dependency type', kind_of_dep.get())
Config.set('Interrogate','Treat files as subcorpora', str(files_as_subcorpora.get()))
Config.add_section('Edit')
Config.add_section('Visualise')
Config.set('Visualise','Plot style', plot_style.get())
Config.set('Visualise','Use TeX', str(texuse.get()))
Config.set('Visualise','x axis title', x_axis_l.get())
Config.set('Visualise','Colour scheme', chart_cols.get())
Config.add_section('Concordance')
Config.set('Concordance','font size', str(fsize.get()))
#Config.set('Concordance','dependency type', conc_kind_of_dep.get())
Config.set('Concordance','coding scheme', codscheme)
if win.get() == 'Window':
window = 70
else:
window = int(win.get())
Config.set('Concordance','window', str(window))
Config.add_section('Manage')
Config.set('Manage','Project path',project_fullpath.get())
Config.write(cfgfile)
timestring('Project settings saved to settings.ini.')
def quitfunc():
if in_a_project.get() == 1:
save_ask = messagebox.askyesno("Save settings",
"Save settings before quitting?")
if save_ask:
save_config()
save_tool_prefs()
realquit.set(1)
root.quit()
root.protocol("WM_DELETE_WINDOW", quitfunc)
def restart(newpath=False):
"""restarts corpkit .py or gui, designed for version updates"""
import sys
import os
import subprocess
import inspect
timestring('Restarting ... ')
# get path to current script
if newpath is False:
newpath = inspect.getfile(inspect.currentframe())
if sys.platform == "win32":
if newpath.endswith('.py'):
timestring('Not yet supported, sorry.')
return
os.startfile(newpath)
else:
opener = "open" if sys.platform == "darwin" else "xdg-open"
if newpath.endswith('.py'):
opener = 'python'
if 'daniel/Work/corpkit' in newpath:
opener = '/Users/daniel/virtenvs/ssled/bin/python'
cmd = [opener, newpath]
else:
if sys.platform == "darwin":
cmd = [opener, '-n', newpath]
else:
cmd = [opener, newpath]
#os.system('%s %s' % (opener, newpath))
#subprocess.Popen(cmd)
from time import sleep
sleep(1)
#reload(inspect.getfile(inspect.currentframe()))
subprocess.Popen(cmd)
try:
the_splash.__exit__()
except:
pass
root.quit()
sys.exit()
def untar(fname, extractto):
"""untar a file"""
import tarfile
tar = tarfile.open(fname)
tar.extractall(extractto)
tar.close()
def update_corpkit(stver):
"""get new corpkit, delete this one, open it up"""
import sys
import os
import inspect
import corpkit
from corpkit.build import download_large_file
# get path to this script
corpath = rd
#corpath = inspect.getfile(inspect.currentframe())
# check we're using executable version, because .py users can
# use github to update
extens = '.%s' % fext
if extens not in corpath and sys.platform != 'darwin':
timestring("Get it from GitHub: https://www.github.com/interrogator/corpkit")
return
# split on .app or .exe, then re-add .app
apppath = corpath.split(extens , 1)[0] + extens
appdir = os.path.dirname(apppath)
# get new version and the abs path of the download dir and the tar file
url = 'https://raw.githubusercontent.com/interrogator/corpkit-app/master/corpkit-%s.tar.gz' % stver
path_to_app_parent = sys.argv[0]
if sys.platform == 'darwin':
if '.app' in path_to_app_parent:
path_to_app_parent = os.path.dirname(path_to_app_parent.split('.app', 1)[0])
else:
# WINDOWS SUPPORT
pass
if '.py' in path_to_app_parent:
py_script = True
path_to_app_parent = os.path.dirname(os.path.join(path_to_app_parent.split('.py', 1)[0]))
downloaded_dir, corpkittarfile = download_large_file(path_to_app_parent, \
url, root=root, note=note, actually_download = True)
timestring('Extracting update ...')
# why not extract to actual dir?
untar(corpkittarfile, downloaded_dir)
timestring('Applying update ...')
# delete the tar
#os.remove(corpkittarfile)
# get whatever the new app is called
newappfname = [f for f in os.listdir(downloaded_dir) if f.endswith(fext)][0]
absnewapp = os.path.join(downloaded_dir, newappfname)
# get the executable in the path
restart_now = messagebox.askyesno("Update and restart",
"Restart now?\n\nThis will delete the current version of corpkit.")
import shutil
if restart_now:
# remove this very app, but not script, just in case
if '.py' not in apppath:
if sys.platform == 'darwin':
shutil.rmtree(apppath)
# if windows, it's not a dir
else:
os.remove(apppath)
# move new version
if sys.platform == 'darwin':
shutil.copytree(absnewapp, os.path.join(appdir, newappfname))
# if windows, it's not a dir
else:
shutil.copy(absnewapp, os.path.join(appdir, newappfname))
# delete donwnloaded file and dir
shutil.rmtree(downloaded_dir)
restart(os.path.join(appdir, newappfname))
# shitty way to do this. what is the standard way of downloading and not installing?
else:
if sys.platform == 'darwin':
try:
shutil.copytree(absnewapp, os.path.join(appdir, newappfname))
except OSError:
shutil.copytree(absnewapp, os.path.join(appdir, newappfname + '-new'))
else:
try:
shutil.copy(absnewapp, os.path.join(appdir, newappfname))
except OSError:
shutil.copy(absnewapp, os.path.join(appdir, newappfname + '-new'))
timestring('New version in %s' % os.path.join(appdir, newappfname + '-new'))
return
def make_float_from_version(ver):
"""take a version string and turn it into a comparable float"""
ver = str(ver)
ndots_to_delete = ver.count('.') - 1
return float(ver[::-1].replace('.', '', ndots_to_delete)[::-1])
def modification_date(filename):
"""get datetime of file modification"""
import os
import datetime
t = os.path.getmtime(filename)
return datetime.datetime.fromtimestamp(t)
def check_updates(showfalse=True, lateprint=False, auto=False):
"""check for updates, minor and major."""
import os
import re
import datetime
from dateutil.parser import parse
import sys
import shutil
if noupdate:
return
# weird hacky way to not repeat request
if do_auto_update.get() == 0 and auto is True:
return
if do_auto_update_this_session.get() is False and auto is True:
return
# cancel auto if manual
if auto is False:
do_auto_update_this_session.set(0)
# get version as float
try:
oldstver = open(os.path.join(rd, 'VERSION.txt'), 'r').read().strip()
except:
import corpkit
oldstver = str(corpkit.__version__)
ver = make_float_from_version(oldstver)
# check for major update
try:
response = requests.get('https://www.github.com/interrogator/corpkit-app', verify=False)
html = response.text
except:
if showfalse:
messagebox.showinfo(
"No connection to remote server",
"Could not connect to remote server.")
return
reg = re.compile('title=.corpkit-([0-9\.]+)\.tar\.gz')
# get version number as string
stver = str(re.search(reg, html).group(1))
vnum = make_float_from_version(stver)
# check for major update
#if 2 == 2:
if vnum > ver:
timestring('Update found: corpkit %s' % stver)
download_update = messagebox.askyesno("Update available",
"Update available: corpkit %s\n\n Download now?" % stver)
if download_update:
update_corpkit(stver)
return
else:
timestring('Update found: corpkit %s. Not downloaded.' % stver)
return
# check for minor update
else:
import sys
timereg = re.compile(r'# <updated>(.*)<.updated>')
#if '.py' in sys.argv[0] and sys.platform == 'darwin':
#oldd = open(os.path.join(rd, 'gui.py'), 'r').read()
#elif '.app' in sys.argv[0]:
oldd = open(os.path.join(rd, 'gui.py'), 'r').read()
dateline = next(l for l in oldd.split('\n') if l.startswith('# <updated>'))
dat = re.search(timereg, dateline).group(1)
try:
olddate = parse(dat)
except:
olddate = modification_date(sys.argv[0])
try:
script_response = requests.get('https://raw.githubusercontent.com/interrogator/corpkit-app/master/gui.py', verify=False)
newscript = script_response.text
dateline = next(l for l in newscript.split('\n') if l.startswith('# <updated>'))
except:
if showfalse:
messagebox.showinfo(
"No connection to remote server",
"Could not connect to remote server.")
return
# parse the date part
try:
dat = re.search(timereg, dateline).group(1)
newdate = parse(dat)
except:
if showfalse:
messagebox.showinfo(
"Error checking for update.",
"Error checking for update.")
return
# testing code
#if 2 == 2:
if newdate > olddate:
timestring('Minor update found: corpkit %s' % stver)
download_update = messagebox.askyesno("Minor update available",
"Minor update available: corpkit %s\n\n Download and apply now?" % stver)
if download_update:
url = 'https://raw.githubusercontent.com/interrogator/corpkit-app/master/corpkit-%s' % oldstver
# update script
if not sys.argv[0].endswith('gui.py'):
script_url = 'https://raw.githubusercontent.com/interrogator/corpkit-app/master/gui.py'
response = requests.get(script_url, verify=False)
with open(os.path.join(rd, 'gui.py'), "w") as fo:
fo.write(response.text)
else:
timestring("Can't replace developer copy, sorry.")
return
dir_containing_ex, execut = download_large_file(project_fullpath.get(),
url = url, root=root, note=note)
# make sure we can execute the new script
import os
os.chmod(execut, 0o777)
if not sys.argv[0].endswith('gui.py'):
os.remove(os.path.join(rd, 'corpkit-%s' % oldstver))
shutil.move(execut, os.path.join(rd, 'corpkit-%s' % oldstver))
shutil.rmtree(dir_containing_ex)
else:
timestring("Can't replace developer copy, sorry.")
return
#import inspect
#sys.argv[0]
#extens = '.%s' % fext
#if extens not in corpath and sys.platform != 'darwin':
# timestring("Get it from GitHub: https://www.github.com/interrogator/corpkit")
# return
## split on .app or .exe, then re-add .app
#apppath = corpath.split(extens , 1)[0] + extens
restart(sys.argv[0].split('.app', 1)[0] + '.app')
return
else:
timestring('Minor update found: corpkit %s, %s. Not downloaded.' % (stver, dat.replace('T', ', ')))
return
if showfalse:
messagebox.showinfo(
"Up to date!",
"corpkit (version %s) up to date!" % oldstver)
timestring('corpkit (version %s) up to date.' % oldstver)
return
def start_update_check():
if noupdate:
return
try:
check_updates(showfalse=False, lateprint=True, auto=True)
except:
filemenu.entryconfig("Check for updates", state="disabled")
def unmax():
"""stop it being always on top"""
root.attributes('-topmost', False)
root.after(1000, unmax)
if not '.py' in sys.argv[0]:
root.after(10000, start_update_check)
def set_corenlp_path():
if sys.platform == 'darwin':
the_kwargs = {'message': 'Select folder containing the CoreNLP parser.'}
else:
the_kwargs = {}
fp = filedialog.askdirectory(title='CoreNLP path',
initialdir=os.path.expanduser("~"),
**the_kwargs)
if fp and fp != '':
corenlppath.set(fp)
if not get_fullpath_to_jars(corenlppath):
recog = messagebox.showwarning(title='CoreNLP not found',
message="CoreNLP not found in %s." % fp )
timestring("CoreNLP not found in %s." % fp )
else:
save_tool_prefs()
def config_menu(*args):
import os
fp = corpora_fullpath.get()
recentmenu.delete(0, END)
if len(most_recent_projects) == 0:
filemenu.entryconfig("Open recent project", state="disabled")
if len(most_recent_projects) == 1 and most_recent_projects[0] == '':
filemenu.entryconfig("Open recent project", state="disabled")
else:
filemenu.entryconfig("Open recent project", state="normal")
for c in list(set(most_recent_projects[::-1][:5])):
if c:
lab = os.path.join(os.path.basename(os.path.dirname(c)), os.path.basename(c))
recentmenu.add_radiobutton(label=lab, variable=recent_project, value = c)
if os.path.isdir(fp):
all_corpora = get_all_corpora()
if len(all_corpora) > 0:
filemenu.entryconfig("Select corpus", state="normal")
selectmenu.delete(0, END)
for c in all_corpora:
selectmenu.add_radiobutton(label=c, variable=current_corpus, value = c)
else:
filemenu.entryconfig("Select corpus", state="disabled")
else:
filemenu.entryconfig("Select corpus", state="disabled")
#filemenu.entryconfig("Manage project", state="disabled")
if in_a_project.get() == 0:
filemenu.entryconfig("Save project settings", state="disabled")
filemenu.entryconfig("Load project settings", state="disabled")
filemenu.entryconfig("Manage project", state="disabled")
#filemenu.entryconfig("Set CoreNLP path", state="disabled")
else:
filemenu.entryconfig("Save project settings", state="normal")
filemenu.entryconfig("Load project settings", state="normal")
filemenu.entryconfig("Manage project", state="normal")
#filemenu.entryconfig("Set CoreNLP path", state="normal")
menubar = Menu(root)
selectmenu = Menu(root)
recentmenu = Menu(root)
if sys.platform == 'darwin':
filemenu = Menu(menubar, tearoff=0, name='apple', postcommand=config_menu)
else:
filemenu = Menu(menubar, tearoff=0, postcommand=config_menu)
filemenu.add_command(label="New project", command=make_new_project)
filemenu.add_command(label="Open project", command=load_project)
filemenu.add_cascade(label="Open recent project", menu=recentmenu)
filemenu.add_cascade(label="Select corpus", menu=selectmenu)
filemenu.add_separator()
filemenu.add_command(label="Save project settings", command=save_config)
filemenu.add_command(label="Load project settings", command=load_config)
filemenu.add_separator()
filemenu.add_command(label="Save tool preferences", command=save_tool_prefs)
filemenu.add_separator()
filemenu.add_command(label="Manage project", command=manage_popup)
filemenu.add_separator()
#filemenu.add_command(label="Coding scheme print", command=print_entryboxes)
# broken on deployed version ... path to self stuff
#filemenu.add_separator()
filemenu.add_command(label="Check for updates", command=check_updates)
#filemenu.entryconfig("Check for updates", state="disabled")
#filemenu.add_separator()
#filemenu.add_command(label="Restart tool", command=restart)
filemenu.add_separator()
#filemenu.add_command(label="Exit", command=quitfunc)
menubar.add_cascade(label="File", menu=filemenu)
if sys.platform == 'darwin':
windowmenu = Menu(menubar, name='window')
menubar.add_cascade(menu=windowmenu, label='Window')
else:
sysmenu = Menu(menubar, name='system')
menubar.add_cascade(menu=sysmenu)
def schemesshow(*args):
"""only edit schemes once in project"""
import os
if project_fullpath.get() == '':
schemenu.entryconfig("Wordlists", state="disabled")
schemenu.entryconfig("Coding scheme", state="disabled")
else:
schemenu.entryconfig("Wordlists", state="normal")
schemenu.entryconfig("Coding scheme", state="normal")
schemenu = Menu(menubar, tearoff=0, postcommand=schemesshow)
menubar.add_cascade(label="Schemes", menu=schemenu)
schemenu.add_command(label="Coding scheme", command=codingschemer)
schemenu.add_command(label="Wordlists", command=custom_lists)
# prefrences section
if sys.platform == 'darwin':
root.createcommand('tk::mac::ShowPreferences', preferences_popup)
def about_box():
"""About message with current corpkit version"""
import os
try:
oldstver = str(open(os.path.join(rd, 'VERSION.txt'), 'r').read().strip())
except:
import corpkit
oldstver = str(corpkit.__version__)
messagebox.showinfo('About', 'corpkit %s\n\ninterrogator.github.io/corpkit\ngithub.com/interrogator/corpkit\npypi.python.org/pypi/corpkit\n\n' \
'Creator: Daniel McDonald\nmcdonaldd@unimelb.edu.au' % oldstver)
def show_log():
"""save log text as txt file and open it"""
import os
the_input = '\n'.join([x for x in note.log_stream])
#the_input = note.text.get("1.0",END)
c = 0
logpath = os.path.join(log_fullpath.get(), 'log-%s.txt' % str(c).zfill(2))
while os.path.isfile(logpath):
logpath = os.path.join(log_fullpath.get(), 'log-%s.txt' % str(c).zfill(2))
c += 1
with open(logpath, "w") as fo:
fo.write(the_input)
prnt = os.path.join('logs', os.path.basename(logpath))
timestring('Log saved to "%s".' % prnt)
import sys
if sys.platform == "win32":
os.startfile(logpath)
else:
opener = "open" if sys.platform == "darwin" else "xdg-open"
import subprocess
subprocess.call(['open', logpath])
def bind_textfuncts_to_widgets(lst):
"""add basic cut copy paste to text entry widgets"""
for i in lst:
i.bind("<%s-a>" % key, select_all_text)
i.bind("<%s-A>" % key, select_all_text)
i.bind("<%s-v>" % key, paste_into_textwidget)
i.bind("<%s-V>" % key, paste_into_textwidget)
i.bind("<%s-x>" % key, cut_from_textwidget)
i.bind("<%s-X>" % key, cut_from_textwidget)
i.bind("<%s-c>" % key, copy_from_textwidget)
i.bind("<%s-C>" % key, copy_from_textwidget)
try:
i.config(undo = True)
except:
pass
# load preferences
load_tool_prefs()
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label="Help", command=lambda: show_help('h'))
helpmenu.add_command(label="Query writing", command=lambda: show_help('q'))
helpmenu.add_command(label="Troubleshooting", command=lambda: show_help('t'))
helpmenu.add_command(label="Save log", command=show_log)
#helpmenu.add_command(label="Set CoreNLP path", command=set_corenlp_path)
helpmenu.add_separator()
helpmenu.add_command(label="About", command=about_box)
menubar.add_cascade(label="Help", menu=helpmenu)
if sys.platform == 'darwin':
import corpkit
import subprocess
ver = corpkit.__version__
corpath = os.path.dirname(corpkit.__file__)
if not corpath.startswith('/Library/Python') and not 'corpkit/corpkit/corpkit' in corpath:
try:
subprocess.call('''/usr/bin/osascript -e 'tell app "Finder" to set frontmost of process "corpkit-%s" to true' ''' % ver, shell = True)
except:
pass
root.config(menu=menubar)
note.focus_on(tab1)
if loadcurrent:
load_project(loadcurrent)
root.deiconify()
root.lift()
try:
root._splash.__exit__()
except:
pass
root.wm_state('normal')
#root.resizable(TRUE,TRUE)
# overwrite quitting behaviour, prompt to save settings
root.createcommand('exit', quitfunc)
root.mainloop()
if __name__ == "__main__":
# the traceback is mostly for debugging pyinstaller errors
import sys
import traceback
import os
lc = sys.argv[-1] if os.path.isdir(sys.argv[-1]) else False
#if lc and sys.argv[-1] == '.':
# lc = os.path.basename(os.getcwd())
# os.chdir('..')
debugmode = 'debug' in list(sys.argv)
def install(name, loc):
"""if we don't have a module, download it"""
try:
import importlib
importlib.import_module(name)
except ImportError:
import pip
pip.main(['install', loc])
tkintertablecode = ('tkintertable', 'git+https://github.com/interrogator/tkintertable.git')
pilcode = ('PIL', 'http://effbot.org/media/downloads/Imaging-1.1.7.tar.gz')
if not any(arg.lower() == 'noinstall' for arg in sys.argv):
install(*tkintertablecode)
from corpkit.constants import PYTHON_VERSION
if PYTHON_VERSION == 2:
install(*pilcode)
try:
if lc:
corpkit_gui(loadcurrent=lc, debug=debugmode)
else:
corpkit_gui(debug=debugmode)
except:
exc_type, exc_value, exc_traceback=sys.exc_info()
print("*** print_tb:")
print(traceback.print_tb(exc_traceback, limit=1, file=sys.stdout))
print("*** print_exception:")
print(traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout))
print("*** print_exc:")
print(traceback.print_exc())
print("*** format_exc, first and last line:")
formatted_lines = traceback.format_exc()
print(formatted_lines)
print("*** format_exception:")
print('\n'.join(traceback.format_exception(exc_type, exc_value,
exc_traceback)))
print("*** extract_tb:")
print('\n'.join([str(i) for i in traceback.extract_tb(exc_traceback)]))
print("*** format_tb:")
print(traceback.format_tb(exc_traceback))
print("*** tb_lineno:", exc_traceback.tb_lineno)
| mit |
AnasGhrab/scikit-learn | sklearn/datasets/samples_generator.py | 35 | 56035 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator=True,
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
return_indicator : bool, optional (default=False),
If ``True``, return ``Y`` in the binary indicator format, else
return a tuple of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array or sparse CSR matrix of shape [n_samples, n_features]
The generated samples.
Y : tuple of lists or array of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
if return_indicator:
Y = MultiLabelBinarizer().fit([range(n_classes)]).transform(Y)
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
meteoswiss-mdr/precipattractor | pyscripts/radar_statistics.py | 1 | 61766 | #!/usr/bin/env python
from __future__ import division
from __future__ import print_function
import os
import sys
import argparse
from PIL import Image
import matplotlib as mpl
#mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
import pylab
import numpy as np
import shutil
import datetime
import time
import warnings
from collections import OrderedDict
import pyfftw
from scipy import stats
import scipy.ndimage as ndimage
import pywt
from pyearth import Earth
import cv2
import getpass
usrName = getpass.getuser()
#### Import personal libraries
import time_tools_attractor as ti
import io_tools_attractor as io
import data_tools_attractor as dt
import stat_tools_attractor as st
import optical_flow as of
import maple_ree
import gis_base as gis
################
np.set_printoptions(precision=2)
noData = -999.0
fmt1 = "%.1f"
fmt2 = "%.2f"
fmt3 = "%.3f"
fmt4 = "%.4f"
fmt5 = "%.5f"
########SET DEFAULT ARGUMENTS##########
timeAccumMin = 5
resKm = 1 # To compute FFT frequency
inBaseDir = '/scratch/' + usrName + '/data/' # directory to read from
outBaseDir = '/store/msrad/radar/precip_attractor/data/'
fourierVar = 'dbz' # field on which to perform the fourier analysis ('rainrate' or 'dbz')
scalingBreakArray_KM = [12] #np.arange(6, 42, 2) # [15]
maxBeta1rangeKM = 512
minBeta2rangeKM = 4
fftDomainSize = 512
FFTmod = 'NUMPY' # 'FFTW' or 'NUMPY'
windowFunction = 'none' #'blackman' or 'none'
########GET ARGUMENTS FROM CMD LINE####
parser = argparse.ArgumentParser(description='Compute radar rainfall field statistics.')
parser.add_argument('-start', default='201601310600', type=str,help='Starting date YYYYMMDDHHmmSS.')
parser.add_argument('-end', default='201601310600', type=str,help='Ending date YYYYMMDDHHmmSS.')
parser.add_argument('-product', default='AQC', type=str,help='Which radar rainfall product to use (AQC, CPC, etc).')
parser.add_argument('-plot', default=0, type=int,help='Whether to plot the rainfall fields and the power spectra.')
parser.add_argument('-analysis', nargs='+', default=['autocorr', 'of'], type=str,help='Type of analysis to do (1d, 2d, of, autocorr, wavelets, 1dnoise, 2dnoise).')
parser.add_argument('-wols', default=0, type=int,help='Whether to use the weighted ordinary leas squares or not in the fitting of the power spectrum.')
parser.add_argument('-minR', default=0.08, type=float,help='Minimum rainfall rate for computation of WAR and various statistics.')
parser.add_argument('-format', default="netcdf", type=str,help='File format for output statistics (netcdf or csv).')
parser.add_argument('-accum', default=5, type=int,help='Accumulation time of the product [minutes].')
parser.add_argument('-temp', default=5, type=int,help='Temporal sampling of the products [minutes].')
args = parser.parse_args()
timeStartStr = args.start
timeEndStr = args.end
boolPlotting = args.plot
product = args.product
weightedOLS = args.wols
timeAccumMin = args.accum
analysis = args.analysis
if set(analysis).issubset(['1d', '2d', 'of', 'autocorr', '2d+autocorr', '1d+2d+autocorr', 'wavelets', '1dnoise', '2dnoise']) == False:
print('You have to ask for a valid analysis [1d, 2d, of, autocorr, 2d+autocorr, 1d+2d+autocorr, wavelets, 1dnoise, 2dnoise]')
sys.exit(1)
if type(scalingBreakArray_KM) != list and type(scalingBreakArray_KM) != np.ndarray:
scalingBreakArray_KM = [scalingBreakArray_KM]
if len(scalingBreakArray_KM) > 1:
variableBreak = 1
else:
variableBreak = 0
if (timeAccumMin == 60) | (timeAccumMin == 60*24):
timeSampMin = timeAccumMin
else:
timeSampMin = args.temp
if args.format == 'netcdf':
strFileFormat = '.nc'
elif args.format == 'csv':
strFileFormat = '.csv'
else:
print('File -format', args.format, ' not valid')
sys.exit(1)
if (int(args.start) > int(args.end)):
print('Time end should be after time start')
sys.exit(1)
if (int(args.start) < 198001010000) or (int(args.start) > 203001010000):
print('Invalid -start or -end time arguments.')
sys.exit(1)
else:
timeStartStr = args.start
timeEndStr = args.end
if (product == 'AQC') or (product == 'CPC'):
print('Computing statistics on ', args.product)
else:
print('Invalid -product argument.')
sys.exit(1)
if fourierVar == 'rainrate':
unitsSpectrum = r"Rainfall field power $\left[ 10\mathrm{log}_{10}\left(\frac{(mm/hr)^2}{km^2}\right)\right]$"
elif fourierVar == 'dbz':
unitsSpectrum = r"Reflectivity field power $\left[ 10\mathrm{log}_{10}\left(\frac{dBZ^2}{km^2}\right)\right]$"
###################################
# Get dattime from timestamp
timeStart = ti.timestring2datetime(timeStartStr)
timeEnd = ti.timestring2datetime(timeEndStr)
timeAccumMinStr = '%05i' % timeAccumMin
timeAccum24hStr = '%05i' % (24*60)
## COLORMAPS
color_list, clevs, clevsStr = dt.get_colorlist('MeteoSwiss') #'STEPS' or 'MeteoSwiss'
cmap = colors.ListedColormap(color_list)
norm = colors.BoundaryNorm(clevs, cmap.N)
cmap.set_over('black',1)
cmapMask = colors.ListedColormap(['black'])
# Load background DEM image
dirDEM = '/users/' + usrName + '/scripts/shapefiles'
fileNameDEM = dirDEM + '/ccs4.png'
isFile = os.path.isfile(fileNameDEM)
if (isFile == False):
print('File: ', fileNameDEM, ' not found.')
else:
print('Reading: ', fileNameDEM)
demImg = Image.open(fileNameDEM)
demImg = dt.extract_middle_domain_img(demImg, fftDomainSize, fftDomainSize)
demImg = demImg.convert('P')
# Limits of CCS4 domain
Xmin = 255000
Xmax = 965000
Ymin = -160000
Ymax = 480000
allXcoords = np.arange(Xmin,Xmax+resKm*1000,resKm*1000)
allYcoords = np.arange(Ymin,Ymax+resKm*1000,resKm*1000)
# Set shapefile filename
fileNameShapefile = dirDEM + '/CHE_adm0.shp'
proj4stringWGS84 = "+proj=longlat +ellps=WGS84 +datum=WGS84"
proj4stringCH = "+proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 \
+k_0=1 +x_0=600000 +y_0=200000 +ellps=bessel +towgs84=674.374,15.056,405.346,0,0,0,0 +units=m +no_defs"
#proj4stringCH = "+proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 \
#+k_0=1 +x_0=2600000 +y_0=1200000 +ellps=bessel +towgs84=674.374,15.056,405.346,0,0,0,0 +units=m +no_defs"
# Array containing the statistics for one single day
nrFilesDay = 24*(60/timeAccumMin)
##### LOOP OVER FILES ##########################################################
# Rainfall stack
nrValidFields = 0
stackSize = 12
rainfallStack = np.zeros((stackSize,fftDomainSize,fftDomainSize))
waveletStack = [None] * stackSize
# Flow stack
zStack = []
tStack = []
rowStack = []
colStack = []
uStack = []
vStack = []
## Daily arrays to write out
dailyStats = []
dailyU = []
dailyV = []
dailyTimesUV = []
dailyWavelets = []
dailyTimesWavelets = []
tic = time.clock()
timeLocal = timeStart
while timeLocal <= timeEnd:
ticOneImg = time.clock()
# Read in radar image into object
timeLocalStr = ti.datetime2timestring(timeLocal)
r = io.read_gif_image(timeLocalStr, product='AQC', minR = args.minR, fftDomainSize = 512, \
resKm = 1, timeAccumMin = 5, inBaseDir = '/scratch/lforesti/data/', noData = -999.0, cmaptype = 'MeteoSwiss', domain = 'CCS4')
hourminStr = ti.get_HHmm_str(timeLocal.hour, timeLocal.minute) # Used to write out data also when there is no valid radar file
minWAR = 0.1
if r.war >= minWAR:
Xmin = r.extent[0]
Xmax = r.extent[1]
Ymin = r.extent[2]
Ymax = r.extent[3]
# Move older rainfall fields down the stack
for s in range(0, rainfallStack.shape[0]-1):
rainfallStack[s+1,:] = rainfallStack[s,:]
# Add last rainfall field on top
rainfallStack[0,:] = r.dBZFourier
# Increment nr of consecutive valid rainfall fields (war >= 0.01)
nrValidFields += 1
########### Compute velocity field ##############
# It will be used to estimate the Lagrangian auto-correlation
if (nrValidFields >= 2) and ('of' in analysis):
print('\t')
ticOF = time.clock()
# extract consecutive images
prvs = rainfallStack[1].copy()
next = rainfallStack[0].copy()
prvs *= 255.0/np.max(prvs)
next *= 255.0/np.max(next)
# 8-bit int
prvs = np.ndarray.astype(prvs,'uint8')
next = np.ndarray.astype(next,'uint8')
# plt.figure()
# plt.imshow(prvs)
# plt.colorbar()
# plt.show()
# remove small noise with a morphological operator (opening)
prvs = of.morphological_opening(prvs, thr=r.zerosDBZ, n=5)
next = of.morphological_opening(next, thr=r.zerosDBZ, n=5)
#+++++++++++ Optical flow parameters
maxCornersST = 500 # Number of asked corners for Shi-Tomasi
qualityLevelST = 0.05
minDistanceST = 5 # Minimum distance between the detected corners
blockSizeST = 15
winsizeLK = 100 # Small windows (e.g. 10) lead to unrealistic high speeds
nrLevelsLK = 0 # Not very sensitive parameter
kernelBandwidth = 100 # Bandwidth of kernel interpolation of vectors
maxSpeedKMHR = 100 # Maximum allowed speed
nrIQRoutlier = 3 # Nr of IQR above median to consider the vector as outlier (if < 100 km/hr)
#++++++++++++++++++++++++++++++++++++
# (1b) Shi-Tomasi good features to track
p0, nCorners = of.ShiTomasi_features_to_track(prvs, maxCornersST, qualityLevel=qualityLevelST, minDistance=minDistanceST, blockSize=blockSizeST)
print("Nr of points OF ShiTomasi =", len(p0))
# (2) Lucas-Kanade tracking
col, row, u, v, err = of.LucasKanade_features_tracking(prvs, next, p0, winSize=(winsizeLK,winsizeLK), maxLevel=nrLevelsLK)
# (3) exclude outliers
speed = np.sqrt(u**2 + v**2)
q1, q2, q3 = np.percentile(speed, [25,50,75])
maxspeed = np.min((maxSpeedKMHR/12, q2 + nrIQRoutlier*(q3 - q1)))
minspeed = np.max((0,q2 - 2*(q3 - q1)))
keep = (speed <= maxspeed) # & (speed >= minspeed)
print('Max speed =',np.max(speed)*12)
print('Median speed =',np.percentile(speed,50)*12)
print('Speed threshold =',maxspeed*12)
# Plot histogram of speeds
# plt.close()
# plt.hist(speed*12, bins=30)
# plt.title('min = %1.1f, max = %1.1f' % (minspeed*12,maxspeed*12))
# plt.axvline(x=maxspeed*12)
# plt.xlabel('Speed [km/hr]')
# plt.show()
u = u[keep].reshape(np.sum(keep),1)
v = v[keep].reshape(np.sum(keep),1)
row = row[keep].reshape(np.sum(keep),1)
col = col[keep].reshape(np.sum(keep),1)
# (4) stack vectors within time window
rowStack.append(row)
colStack.append(col)
uStack.append(u)
vStack.append(v)
# convert lists of arrays into single arrays
row = np.vstack(rowStack)
col = np.vstack(colStack)
u = np.vstack(uStack)
v = np.vstack(vStack)
if (nrValidFields >= 4):
colStack.pop(0)
rowStack.pop(0)
uStack.pop(0)
vStack.pop(0)
# (1) decluster sparse motion vectors
col, row, u, v = of.declustering(col, row, u, v, R = 20, minN = 3)
print("Nr of points OF after declustering =", len(row))
# (2) kernel interpolation
domainSize = [fftDomainSize, fftDomainSize]
colgrid, rowgrid, U, V, b = of.interpolate_sparse_vectors_kernel(col, row, u, v, domainSize, b = kernelBandwidth)
print('Kernel bandwith =',b)
# Add U,V fields to daily collection
dailyU.append(U)
dailyV.append(-V) # Reverse V orientation (South -> North)
dailyTimesUV.append(timeLocalStr)
# Compute advection
# resize motion fields by factor f (for advection)
f = 0.5
if (f<1):
Ures = cv2.resize(U, (0,0), fx=f, fy=f)
Vres = cv2.resize(V, (0,0), fx=f, fy=f)
else:
Ures = U
Vres = V
tocOF = time.clock()
# Call MAPLE routine for advection
net = 1
rainfield_lag1 = maple_ree.ree_epol_slio(rainfallStack[1], Vres, Ures, net)
# Call MAPLE routine for advection over several time stamps
# net = np.min([12, nrValidFields])
# for lag in range(2,net):
# rainfield_advected = maple_ree.ree_epol_slio(rainfallStack[2], Vres, Ures, net)
# plt.close()
# plt.subplot(121)
# plt.imshow(rainfallStack[1], vmin=8, vmax=55)
# plt.subplot(122)
# plt.imshow(rainfield_lag1[:,:,-1], vmin=8, vmax=55)
# plt.show()
# sys.exit()
# Resize vector fields for plotting
xs, ys, Us, Vs = of.reduce_field_density_for_plotting(colgrid, rowgrid, U, V, 25)
# Plot vectors to check if correct
# plt.quiver(xs, ys, Us, Vs)
# plt.show()
print('Elapsed time OF: ', tocOF - ticOF, ' seconds.')
print('\t')
########### Compute Wavelet transform ###########
if 'wavelets' in analysis:
wavelet = 'haar'
w = pywt.Wavelet(wavelet)
#print(w)
# Upscale field in rainrate
wavelet_coeff = st.wavelet_decomposition_2d(r.rainrate, wavelet, nrLevels = None)
# Transform into dBZ
for level in range(0,len(wavelet_coeff)):
wavelet_coeff[level],_,_ = dt.rainrate2reflectivity(wavelet_coeff[level])
# Generate coordinates of centers of wavelet coefficients
xvecs, yvecs = st.generate_wavelet_coordinates(wavelet_coeff, r.dBZFourier.shape, Xmin, Xmax, Ymin, Ymax, resKm*1000)
# Append a given wavelet scale to write out into daily netCDF files
scaleKm_asked = 8
scale2keep = st.get_level_from_scale(resKm, scaleKm_asked)
scaleKm = xvecs[scale2keep][1] - xvecs[scale2keep][0]
scaleKm = int(scaleKm/1000)
if scaleKm_asked != scaleKm:
print('Asked and returned wavelet scales not matching.', scaleKm_asked, 'vs', scaleKm)
sys.exit()
else:
print('Wavelet scale = ', scaleKm, 'km')
dailyWavelets.append(wavelet_coeff[scale2keep])
dailyTimesWavelets.append(timeLocalStr)
# # Write out wavelet coefficients to netCDF file
# # Keep only large scales (smaller file size)
# wavelet_coeff_image = wavelet_coeff[1:]
# analysisType = 'WAVELET'
# fileNameWavelet,_,_ = io.get_filename_stats(inBaseDir, analysisType, timeLocal, product, \
# timeAccumMin=timeAccumMin, quality=0, format='netcdf')
# io.write_netcdf_waveletcoeffs(fileNameWavelet, timeLocalStr, \
# xvecs, yvecs, wavelet_coeff_image, waveletType = wavelet)
# print('Saved:', fileNameWavelet)
## Add wavelet coeffs to the stack
for s in range(0, len(waveletStack)-1):
waveletStack[s+1] = waveletStack[s]
waveletStack[0] = wavelet_coeff
# # Full wavelet decomposition to get also the HDV residual components
waveletHVD = False
nrLevels = 6
if waveletHVD:
coeffs = pywt.wavedec2(r.dBZFourier, w, level=nrLevels)
#cA2, (cH2, cV2, cD2), (cH1, cV1, cD1) = coeffs
cA2 = coeffs[0]
# ###### Use wavelets to generate a field of correlated noise
waveletNoise = False
level2perturb = [3,4,5]
nrMembers = 3
if waveletNoise:
# Generate white noise at a given level
stochasticEnsemble = st.generate_wavelet_noise(r.dBZFourier, w, nrLevels, level2perturb, nrMembers)
########### Compute Fourier power spectrum ###########
ticFFT = time.clock()
minFieldSize = np.min(fftDomainSize)
# Replace zeros with the lowest rainfall threshold (to obtain better beta2 estimations)
if fourierVar == 'rainrate':
rainfieldFourier = r.rainrate
rainfieldFourier[rainfieldFourier < args.minR] = args.minR
if fourierVar == 'dbz':
rainfieldFourier = r.dBZFourier
zerosDBZ,_,_ = dt.rainrate2reflectivity(args.minR)
# Method 1: Set the zeros to the dBZ threshold
# rainfieldFourier[rainfieldFourier < zerosDBZ] = zerosDBZ
# Method 2: Remove the dBZ threshold to all data
rainfieldFourier = rainfieldFourier - zerosDBZ
# plt.imshow(rainfieldFourier)
# plt.colorbar()
# plt.show()
# Compute 2D power spectrum
psd2d, freqAll = st.compute_2d_spectrum(rainfieldFourier, resolution=resKm, window=None, FFTmod='NUMPY')
# Compute autocorrelation using inverse FFT of spectrum
if ('autocorr' in analysis) or ('1d' in analysis) or ('2d+autocorr' in analysis) or ('1d+2d+autocorr' in analysis) or ('wavelets' in analysis):
# Compute autocorrelation
autocorr,_,_,_ = st.compute_autocorrelation_fft2(rainfieldFourier, FFTmod = 'NUMPY')
# Compute anisotropy from autocorrelation function
autocorrSizeSub = 255
percentileZero = 90
autocorrSub, eccentricity_autocorr, orientation_autocorr, xbar_autocorr, ybar_autocorr, eigvals_autocorr, eigvecs_autocorr, percZero_autocorr,_ = st.compute_fft_anisotropy(autocorr, autocorrSizeSub, percentileZero, rotation=False)
if ('2d' in analysis) or ('2d+autocorr' in analysis) or ('1d+2d+autocorr' in analysis) or ('wavelets' in analysis):
cov2logPS = True # Whether to compute the anisotropy on the log of the 2d PS
# Extract central region of 2d power spectrum and compute covariance
if cov2logPS:
psd2d_anis = 10.0*np.log10(psd2d)
else:
psd2d_anis = np.copy(psd2d)
# Compute anisotropy from FFT spectrum
fftSizeSub = 40#255
percentileZero = 90
smoothing_sigma = 3
psd2dsub, eccentricity_ps, orientation_ps, xbar_ps, ybar_ps, eigvals_ps, eigvecs_ps, percZero_ps, psd2dsubSmooth = st.compute_fft_anisotropy(psd2d_anis, fftSizeSub, percentileZero, sigma = smoothing_sigma)
print(percentileZero,'- percentile = ', percZero_ps)
# Compute 1D radially averaged power spectrum
psd1d, freq, wavelengthKm = st.compute_radialAverage_spectrum(psd2d, resolution=resKm)
############ Compute spectral slopes Beta
r_beta1_best = 0
r_beta2_best = 0
for s in range(0,len(scalingBreakArray_KM)):
scalingBreak_KM = scalingBreakArray_KM[s]
largeScalesLims = np.array([maxBeta1rangeKM, scalingBreak_KM])
smallScalesLims = np.array([scalingBreak_KM, minBeta2rangeKM])
idxBeta1 = (wavelengthKm <= largeScalesLims[0]) & (wavelengthKm > largeScalesLims[1]) # large scales
idxBeta2 = (wavelengthKm <= smallScalesLims[0]) & (wavelengthKm > smallScalesLims[1]) # small scales
idxBetaBoth = (wavelengthKm <= largeScalesLims[0]) & (wavelengthKm > smallScalesLims[1]) # all scales
#print('Nr points beta1 = ', np.sum(idxBeta1))
#print('Nr points beta2 = ', np.sum(idxBeta2))
#io.write_csv('/users/' + usrName + '/results/ps_marco.csv', ['freq','psd'], np.asarray([freq,psd1d]).T.tolist())
# Compute betas using OLS
if weightedOLS == 0:
beta1, intercept_beta1, r_beta1 = st.compute_beta_sm(10*np.log10(freq[idxBeta1]),10*np.log10(psd1d[idxBeta1]))
beta2, intercept_beta2, r_beta2 = st.compute_beta_sm(10*np.log10(freq[idxBeta2]), 10*np.log10(psd1d[idxBeta2]))
elif weightedOLS == 1:
# Compute betas using weighted OLS
linWeights = len(freq[idxBeta1]) - np.arange(len(freq[idxBeta1]))
#logWeights = 10*np.log10(linWeights)
logWeights = linWeights
beta1, intercept_beta1,r_beta1 = st.compute_beta_sm(10*np.log10(freq[idxBeta1]), 10*np.log10(psd1d[idxBeta1]), logWeights)
linWeights = len(freq[idxBeta2]) - np.arange(len(freq[idxBeta2]))
#logWeights = 10*np.log10(linWeights)
logWeights = linWeights
beta2, intercept_beta2, r_beta2 = st.compute_beta_sm(10*np.log10(freq[idxBeta2]), 10*np.log10(psd1d[idxBeta2]), logWeights)
else:
print("Please set weightedOLS either to 0 or 1")
sys.exit(1)
# Select best fit based on scaling break
if np.abs(r_beta1 + r_beta2) > np.abs(r_beta1_best + r_beta2_best):
r_beta1_best = r_beta1
r_beta2_best = r_beta2
beta1_best = beta1
intercept_beta1_best = intercept_beta1
beta2_best = beta2
intercept_beta2_best = intercept_beta2
scalingBreak_best = scalingBreak_KM
smallScalesLims_best = smallScalesLims
largeScalesLims_best = largeScalesLims
scalingBreak_Idx = idxBeta2[0]
r_beta1 = r_beta1_best
r_beta2 = r_beta2_best
beta1 = beta1_best
beta2 = beta2_best
intercept_beta1 = intercept_beta1_best
intercept_beta2 = intercept_beta2_best
smallScalesLims = smallScalesLims_best
largeScalesLims = largeScalesLims_best
if variableBreak == 1:
print("Best scaling break corr. = ", scalingBreak_best, ' km')
else:
print("Fixed scaling break = ", scalingBreak_best, ' km')
#### Fitting spectral slopes with MARS (Multivariate Adaptive Regression Splines)
useMARS = False
if useMARS:
model = Earth(max_degree = 1, max_terms = 2)
model.fit(dt.to_dB(freq[idxBetaBoth]), dt.to_dB(psd1d[idxBetaBoth]))
mars_fit = model.predict(dt.to_dB(freq[idxBetaBoth]))
# plt.scatter(dt.to_dB(freq),dt.to_dB(psd1d))
# plt.plot(dt.to_dB(freq[idxBetaBoth]), mars_fit)
# plt.show()
# print(model.trace())
# print(model.summary())
# print(model.basis_)
# print(model.coef_[0])
#y_prime_hat = model.predict_deriv(dt.to_dB(freq[idxBetaBoth]), 'x6')
scalingBreak_MARS = str(model.basis_[2])[2:7]
scalingBreak_MARS_KM = 1.0/dt.from_dB(float(scalingBreak_MARS))
print("Best scaling break MARS = ", scalingBreak_MARS_KM, ' km')
tocFFT = time.clock()
#print('FFT time: ', tocFFT-ticFFT, ' seconds.')
##################### COMPUTE SUMMARY STATS #####################################
# Compute field statistics in rainfall units
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
rainmean = np.nanmean(r.rainrate.ravel())
rainstd = np.nanstd(r.rainrate.ravel())
raincondmean = np.nanmean(r.rainrateNans.ravel())
raincondstd = np.nanstd(r.rainrateNans.ravel())
# Compute field statistics in dBZ units
dBZmean = np.nanmean(r.dBZ.ravel())
dBZstd = np.nanstd(r.dBZ.ravel())
dBZcondmean = np.nanmean(r.dBZNans.ravel())
dBZcondstd = np.nanstd(r.dBZNans.ravel())
# Compute Eulerian Auto-correlation
if (nrValidFields >= 2) and ('of' in analysis):
corr_eul_lag1 = np.corrcoef(rainfallStack[0,:].flatten(), rainfallStack[1,:].flatten())
corr_eul_lag1 = corr_eul_lag1[0,1]
print("Eulerian correlation =", fmt3 % corr_eul_lag1)
# Compute Eulerian correlation at each wavelet coeff level
# corr_eul_wavelet_levels = []
# for level in range(0,len(wavelet_coeff)):
# corr_eul_level = np.corrcoef(np.array(waveletStack[0][level]).flatten(), np.array(waveletStack[1][level]).flatten())
# corr_eul_level = corr_eul_level[0,1]
# corr_eul_wavelet_levels.append(corr_eul_level)
# print(corr_eul_wavelet_levels)
# plt.figure()
# plt.scatter(rainfallStack[0,:], rainfallStack[1,:])
# plt.show()
else:
corr_eul_lag1 = np.nan
# Compute Lagrangian auto-correlation
if (nrValidFields >= 2) and ('of' in analysis):
corr_lagr_lag1 = np.corrcoef(rainfield_lag1.flatten(), rainfallStack[0,:].flatten())
corr_lagr_lag1 = corr_lagr_lag1[0,1]
print("Lagrangian correlation =", fmt3 % corr_lagr_lag1)
print("Diff. Lagr-Eul correlation =", fmt3 % (corr_lagr_lag1 - corr_eul_lag1))
# plt.figure()
# plt.scatter(rainfallStack[0,:], rainfallStack[1,:])
# plt.show()
corr_lagr_lags = []
for lag in range(1,net):
corr_lagr = np.corrcoef(rainfield_advected[lag].flatten(), rainfallStack[0,:].flatten())
corr_lagr_lags.append(corr_lagr[0,1])
print('Lagrangian correlation lags =', corr_lagr_lags)
else:
corr_lagr_lag1 = np.nan
################### COLLECT DAILY STATS
timeStampStr = ti.datetime2timestring(timeLocal)
# Headers
headers = ['time', 'alb', 'doe', 'mle', 'ppm', 'wei', 'war', 'r_mean', 'r_std', 'r_cmean', 'r_cstd',
'dBZ_mean', 'dBZ_std', 'dBZ_cmean', 'dBZ_cstd',
'beta1', 'corr_beta1', 'beta2', 'corr_beta2' , 'scaling_break', 'eccentricity', 'orientation',
'corr_eul_lag1', 'corr_lagr_lag1']
if '2d' in analysis:
eccentricity = eccentricity_ps
orientation = orientation_ps
else:
eccentricity = eccentricity_autocorr
orientation = orientation_autocorr
# Data
instantStats = [timeStampStr,
str(r.alb),
str(r.dol),
str(r.lem),
str(r.ppm),
str(r.wei),
fmt4 % r.war,
fmt5 % rainmean,
fmt5 % rainstd,
fmt5 % raincondmean,
fmt5 % raincondstd,
fmt4 % dBZmean,
fmt4 % dBZstd,
fmt4 % dBZcondmean,
fmt4 % dBZcondstd,
fmt4 % beta1,
fmt4 % r_beta1,
fmt4 % beta2,
fmt4 % r_beta2,
int(scalingBreak_best),
fmt4 % eccentricity,
fmt4 % orientation,
fmt4 % corr_eul_lag1,
fmt4 % corr_lagr_lag1
]
print('+++++++ Radar statistics +++++++')
outputPrint = OrderedDict(zip(headers, instantStats))
print(outputPrint)
print('++++++++++++++++++++++++++++++++')
# Append statistics to daily array
dailyStats.append(instantStats)
######################## PLOT WAVELETS ######################
if 'wavelets' in analysis and boolPlotting:
if waveletNoise:
nrRows,nrCols = dt.optimal_size_subplot(nrMembers+1)
# Adjust figure parameters
ratioFig = nrCols/nrRows
figWidth = 14
colorbar = 'off'
fig = plt.figure(figsize=(ratioFig*figWidth,figWidth))
padding = 0.01
plt.subplots_adjust(hspace=0.05, wspace=0.01)
mpl.rcParams['image.interpolation'] = 'nearest'
# Plot rainfield
plt.subplot(nrRows, nrCols, 1)
PC = plt.imshow(r.dBZFourier, vmin=15, vmax=45)
plt.title('Rainfield [dBZ]',fontsize=15)
plt.axis('off')
# Plot stochastic ensemble
for member in range(0, nrMembers):
plt.subplot(nrRows, nrCols, member+2)
plt.imshow(stochasticEnsemble[member],vmin=15, vmax=45)
plt.title('Member '+ str(member+1), fontsize=15)
plt.axis('off')
plt.suptitle('Stochastic ensemble based on wavelet type: ' + wavelet + '\n by perturbing levels ' + str(level2perturb), fontsize=20)
stringFigName = '/users/lforesti/results/' + product + r.yearStr + r.julianDayStr + r.hourminStr + '-' + wavelet + '-waveletEnsemble_' + timeAccumMinStr + '.png'
plt.savefig(stringFigName, dpi=300)
print(stringFigName, ' saved.')
plt.close()
if waveletHVD:
# Plot of all the horizontal, diagonal and vertical components of the wavelet transform
pltWavelets = ['H','V','D']
nrPlots = (len(coeffs)-1)*len(pltWavelets)+2
mpl.rcParams['image.interpolation'] = 'none'
nrRows,nrCols = dt.optimal_size_subplot(nrPlots)
print('Nr. plots = ' + str(nrPlots), ' in ', str(nrRows), 'x', str(nrCols))
# Adjust figure parameters
ratioFig = nrCols/nrRows
figWidth = 14
colorbar = 'off'
fig = plt.figure(figsize=(ratioFig*figWidth,figWidth))
padding = 0.01
plt.subplots_adjust(hspace=0.05, wspace=0.01)
###
# Plot rainfield
ax1 = plt.subplot(nrRows, nrCols, 1)
PC = plt.imshow(r.dBZFourier, vmin=15, vmax=45)
plt.title('Rainfield [dBZ]')
plt.axis('off')
# Colorbar
if colorbar == 'on':
divider = make_axes_locatable(ax1)
cax1 = divider.append_axes("right", size="5%", pad=padding)
cbar = plt.colorbar(PC, cax = cax1)
nplot = 2
for level in range(1,nrLevels+1):
for p in range(0,len(pltWavelets)):
waveletLevel = nrLevels+1 - level
# Plot wavelet coefficients for horizontal/vertical/diagonal components
var = coeffs[waveletLevel][p]
minimum = np.percentile(var, 1)
maximum = np.percentile(var, 99)
ax1 = plt.subplot(nrRows, nrCols, nplot)
PC = plt.imshow(var, vmin=minimum, vmax=maximum, aspect=var.shape[1]/var.shape[0])
if p == 0:
titleStr = 'Level ' + str(level) + ' - horizontal'
if p == 1:
titleStr = 'Level ' + str(level) + ' - vertical'
if p == 2:
titleStr = 'Level ' + str(level) + ' - diagonal'
plt.title(titleStr)
plt.axis('off')
# Colorbar
if colorbar == 'on':
divider = make_axes_locatable(ax1)
cax1 = divider.append_axes("right", size="5%", pad=padding)
cbar = plt.colorbar(PC, cax = cax1)
nplot = nplot + 1
# Plot approximation at last scale
minimum = np.percentile(cA2, 1)
maximum = np.percentile(cA2, 99)
ax1 = plt.subplot(nrRows, nrCols, nplot)
PC = plt.imshow(cA2, aspect=cA2.shape[1]/cA2.shape[0])
plt.title('Approximation')
plt.axis('off')
# Colorbar
if colorbar == 'on':
divider = make_axes_locatable(ax1)
cax1 = divider.append_axes("right", size="5%", pad=padding)
cbar = plt.colorbar(PC, cax = cax1)
plt.suptitle('Wavelet type: ' + wavelet, fontsize=20)
#plt.show()
waveletDirs = "".join(pltWavelets)
stringFigName = '/users/lforesti/results/' + product + r.yearStr + r.julianDayStr \
+ r.hourminStr + '-' + wavelet + '-wavelet_' + waveletDirs + '_' + timeAccumMinStr + '.png'
plt.savefig(stringFigName, dpi=300)
print(stringFigName, ' saved.')
###### Plots of the wavelet approximation at each scale
nrPlots = len(wavelet_coeff)
nrRows,nrCols = dt.optimal_size_subplot(nrPlots)
fig = plt.figure()
ax = fig.add_axes()
ax = fig.add_subplot(111)
for scale in range(1,nrPlots+1):
plt.subplot(nrRows, nrCols, scale)
im = plt.imshow(wavelet_coeff[scale-1], vmin=r.dbzThreshold, vmax=50, interpolation='nearest')
if scale == nrPlots:
scaleKm_l = (xvecs[scale-2][1] - xvecs[scale-2][0])*2
else:
scaleKm_l = xvecs[scale-1][1] - xvecs[scale-1][0]
scaleKm_l = int(scaleKm/1000)
titleStr = 'Scale = ' + str(scaleKm_l) + ' km'
plt.title(titleStr, fontsize=12)
plt.axis('off')
fig.tight_layout()
fig.subplots_adjust(top=0.92, right=0.8)
cbar_ax = fig.add_axes([0.90, 0.15, 0.03, 0.7])
fig.colorbar(im, cax=cbar_ax)
plt.suptitle('Low pass wavelet decomposition', fontsize=15)
stringFigName = '/users/lforesti/results/' + product + r.yearStr + r.julianDayStr \
+ r.hourminStr + '-' + wavelet + '-waveletApprox_' + timeAccumMinStr + '.png'
plt.savefig(stringFigName, dpi=300)
print(stringFigName, ' saved.')
################ PLOTTING RAINFIELD #################################
# ++++++++++++
if boolPlotting:
titlesSize = 20
labelsSize = 18
ticksSize = 16
unitsSize=14
colorbarTicksSize=14
mpl.rcParams['xtick.labelsize'] = ticksSize
mpl.rcParams['ytick.labelsize'] = ticksSize
plt.close("all")
analysisFFT = []
for i in range(0,len(analysis)):
if (analysis[i] == '1d') or (analysis[i] == '2d') or (analysis[i] == 'autocorr') or (analysis[i] == '1d+2d+autocorr') or (analysis[i] == '2dnoise') or (analysis[i] == '2d+autocorr'):
analysisFFT.append(analysis[i])
# Loop over different analyses (1d, 2d autocorr)
for an in analysisFFT:
if an == '1d+2d+autocorr':
fig = plt.figure(figsize=(18,18))
elif an == '2d+autocorr':
fig = plt.figure(figsize=(8.3,20))
else:
fig = plt.figure(figsize=(16,7.5))
ax = fig.add_axes()
ax = fig.add_subplot(111)
if an == '1d+2d+autocorr':
rainAx = plt.subplot(221)
elif an == '2d+autocorr':
rainAx = plt.subplot(311)
else:
rainAx = plt.subplot(121)
# Draw DEM
rainAx.imshow(demImg, extent = r.extent, vmin=100, vmax=3000, cmap = plt.get_cmap('gray'))
# Draw rainfield
rainIm = rainAx.imshow(r.rainrateNans, extent = r.extent, cmap=cmap, norm=norm, interpolation='nearest')
# Draw shapefile
gis.read_plot_shapefile(fileNameShapefile, proj4stringWGS84, proj4stringCH, ax=rainAx, linewidth = 0.75)
if (nrValidFields >= 2) and ('of' in analysis):
ycoord_flipped = fftDomainSize-1-ys
plt.quiver(Xmin+xs*1000, Ymin+ycoord_flipped*1000, Us, -Vs, angles = 'xy', scale_units='xy')
#plt.quiver(Xmin+x*1000, Ymin+ycoord_flipped*1000, u, -v, angles = 'xy', scale_units='xy')
# Colorbar
cbar = plt.colorbar(rainIm, ticks=clevs, spacing='uniform', norm=norm, extend='max', fraction=0.04)
cbar.ax.tick_params(labelsize=colorbarTicksSize)
cbar.set_ticklabels(clevsStr, update_ticks=True)
if (timeAccumMin == 1440):
cbar.ax.set_title(" mm/day",fontsize=unitsSize)
elif (timeAccumMin == 60):
cbar.ax.set_title(" mm/h",fontsize=unitsSize)
elif (timeAccumMin == 5):
if an == '2d+autocorr':
cbar.set_label(r"mm h$^{-1}$",fontsize=unitsSize)
else:
cbar.ax.set_title(r" mm hr$^{-1}$",fontsize=unitsSize)
else:
print('Accum. units not defined.')
#cbar.ax.xaxis.set_label_position('top')
# # Set ticks for dBZ on the other side
# ax2 =plt.twinx(ax=cbar.ax)
# dBZlimits,_,_ = dt.rainrate2reflectivity(clevs,A,b)
# dBZlimits = np.round(dBZlimits)
# ax2.set_ylim(-10, 10)
# ax2.set_yticklabels(dBZlimits)
titleStr = timeLocal.strftime("%Y.%m.%d %H:%M") + ', ' + product + ' rainfall field, Q' + str(r.dataQuality)
titleStr = 'Radar rainfall field on ' + timeLocal.strftime("%Y.%m.%d %H:%M")
plt.title(titleStr, fontsize=titlesSize)
# Draw radar composite mask
rainAx.imshow(r.mask, cmap=r.cmapMask, extent = r.extent, alpha = 0.5)
# Add product quality within image
dataQualityTxt = "Quality = " + str(r.dataQuality)
if (an == 'of'):
plt.text(-0.15,-0.12, "Eulerian correlation = " + fmt3 % corr_eul_lag1, transform=rainAx.transAxes)
plt.text(-0.15,-0.15, "Lagrangian correlation = " + fmt3 % corr_lagr_lag1, transform=rainAx.transAxes)
diffPercEulLagr = (corr_lagr_lag1 - corr_eul_lag1)*100
plt.text(-0.15,-0.18, "Difference Lagr/Eul = " + fmt2 % diffPercEulLagr + ' %', transform=rainAx.transAxes)
# Set X and Y ticks for coordinates
xticks = np.arange(400, 900, 100)
yticks = np.arange(0, 500 ,100)
plt.xticks(xticks*1000, xticks)
plt.yticks(yticks*1000, yticks)
plt.xlabel('Swiss Easting [km]', fontsize=labelsSize)
plt.ylabel('Swiss Northing [km]', fontsize=labelsSize)
#################### PLOT SPECTRA ###########################################################
#++++++++++++ Draw 2d power spectrum
if (an == '2d') | (an == '2dnoise') | (an == '2d+autocorr') | (an == '1d+2d+autocorr'):
if an == '1d+2d+autocorr':
psAx2 = plt.subplot(222)
elif an == '2d+autocorr':
psAx2 = plt.subplot(312)
else:
psAx2 = plt.subplot(122)
if fourierVar == 'rainrate':
psLims =[-50,40]
if fourierVar == 'dbz':
psLims = [-20,70]
extentFFT = (-minFieldSize/2,minFieldSize/2,-minFieldSize/2,minFieldSize/2)
if (an == '2d') | (an == '2d+autocorr') | (an == '1d+2d+autocorr'):
# Smooth 2d PS for plotting contours
if cov2logPS == False:
psd2dsubSmooth = 10.0*np.log10(psd2dsubSmooth)
# Plot image of 2d PS
#psAx2.invert_yaxis()
clevsPS = np.arange(-5,70,5)
cmapPS = plt.get_cmap('nipy_spectral', clevsPS.shape[0]) #nipy_spectral, gist_ncar
normPS = colors.BoundaryNorm(clevsPS, cmapPS.N-1)
cmapPS.set_over('white',1)
# Compute alpha transparency vector
#cmapPS._init()
#cmapPS._lut[clevsPS <= percZero,-1] = 0.5
if cov2logPS:
imPS = psAx2.imshow(psd2dsub, interpolation='nearest', cmap=cmapPS, norm=normPS)
else:
imPS = psAx2.imshow(10.0*np.log10(psd2dsub), interpolation='nearest', cmap=cmapPS, norm=normPS)
# Plot smooth contour of 2d PS
# percentiles = [70,80,90,95,98,99,99.5]
# levelsPS = np.array(st.percentiles(psd2dsubSmooth, percentiles))
# print("Contour levels quantiles: ",percentiles)
# print("Contour levels 2d PS : ", levelsPS)
# if np.sum(levelsPS) != 0:
# im1 = psAx2.contour(psd2dsubSmooth, levelsPS, colors='black', alpha=0.25)
# im1 = psAx2.contour(psd2dsubSmooth, [percZero], colors='black', linestyles='dashed')
# Plot major and minor axis of anisotropy
#st.plot_bars(xbar_ps, ybar_ps, eigvals_ps, eigvecs_ps, psAx2, 'red')
#plt.text(0.05, 0.95, 'eccentricity = ' + str(fmt2 % eccentricity_ps), transform=psAx2.transAxes, backgroundcolor = 'w', fontsize=14)
#plt.text(0.05, 0.90, 'orientation = ' + str(fmt2 % orientation_ps) + '$^\circ$', transform=psAx2.transAxes,backgroundcolor = 'w', fontsize=14)
# Create ticks in km
ticks_loc = np.arange(0,2*fftSizeSub,1)
# List of ticks for X and Y (reference from top)
ticksListX = np.hstack((np.flipud(-resKm/freq[1:fftSizeSub+1]),0,resKm/freq[1:fftSizeSub])).astype(int)
ticksListY = np.flipud(ticksListX)
# List of indices where to display the ticks
if fftSizeSub <= 20:
idxTicksX = np.hstack((np.arange(0,fftSizeSub-1,2),fftSizeSub-1,fftSizeSub+1,np.arange(fftSizeSub+2,2*fftSizeSub,2))).astype(int)
idxTicksY = np.hstack((np.arange(1,fftSizeSub-2,2),fftSizeSub-2,fftSizeSub,np.arange(fftSizeSub+1,2*fftSizeSub,2))).astype(int)
else:
idxTicksX = np.hstack((np.arange(1,fftSizeSub-2,4),fftSizeSub-1,fftSizeSub+1,np.arange(fftSizeSub+3,2*fftSizeSub,4))).astype(int)
idxTicksY = np.hstack((np.arange(0,fftSizeSub-3,4),fftSizeSub-2,fftSizeSub,np.arange(fftSizeSub+2,2*fftSizeSub,4))).astype(int)
plt.xticks(rotation=90)
psAx2.set_xticks(ticks_loc[idxTicksX])
psAx2.set_xticklabels(ticksListX[idxTicksX], fontsize=13)
psAx2.set_yticks(ticks_loc[idxTicksY])
psAx2.set_yticklabels(ticksListY[idxTicksY], fontsize=13)
plt.xlabel('Wavelength [km]', fontsize=labelsSize)
plt.ylabel('Wavelength [km]', fontsize=labelsSize)
#plt.gca().invert_yaxis()
else:
#plt.contourf(10*np.log10(psd2dnoise), 20, vmin=-15, vmax=0)
imPS = plt.imshow(10*np.log10(psd2dnoise), extent=(extentFFT[0], extentFFT[1], extentFFT[2], extentFFT[3]), vmin=-15, vmax=0)
plt.gca().invert_yaxis()
cbar = plt.colorbar(imPS, ticks=clevsPS, spacing='uniform', norm=normPS, extend='max', fraction=0.04)
cbar.ax.tick_params(labelsize=colorbarTicksSize)
cbar.set_label(unitsSpectrum, fontsize=unitsSize)
#cbar.ax.set_title(unitsSpectrum, fontsize=unitsSize)
titleStr = '2D power spectrum (rotated by 90$^\circ$)'
plt.title(titleStr, fontsize=titlesSize)
#++++++++++++ Draw autocorrelation function
if (an == 'autocorr') | (an == '2d+autocorr') | (an == '1d+2d+autocorr'):
if an == '1d+2d+autocorr':
autocorrAx = plt.subplot(223)
elif an == '2d+autocorr':
autocorrAx = plt.subplot(313)
else:
autocorrAx = plt.subplot(122)
maxAutocov = np.max(autocorrSub)
if maxAutocov > 50:
clevsPS = np.arange(0,200,10)
elif maxAutocov > 10:
clevsPS = np.arange(0,50,5)
else:
clevsPS = np.arange(-0.05,1.05,0.05)
clevsPSticks = np.arange(-0.1,1.1,0.1)
cmapPS = plt.get_cmap('nipy_spectral', clevsPS.shape[0]) #nipy_spectral, gist_ncar
normPS = colors.BoundaryNorm(clevsPS, cmapPS.N)
cmaplist = [cmapPS(i) for i in range(cmapPS.N)]
# force the first color entry to be white
#cmaplist[0] = (1,1,1,1.0)
# Create the new map
cmapPS = cmapPS.from_list('Custom cmap', cmaplist, cmapPS.N)
cmapPS.set_under('white',1)
ext = (-autocorrSizeSub, autocorrSizeSub, -autocorrSizeSub, autocorrSizeSub)
imAC = autocorrAx.imshow(autocorrSub, cmap=cmapPS, norm=normPS, extent = ext)
#cbar = plt.colorbar(imAC, ticks=clevsPS, spacing='uniform', norm=normPS, extend='max', fraction=0.03)
cbar = plt.colorbar(imAC, ticks=clevsPSticks, spacing='uniform', extend='min', norm=normPS,fraction=0.04)
cbar.ax.tick_params(labelsize=colorbarTicksSize)
cbar.set_label('correlation coefficient', fontsize=unitsSize)
im1 = autocorrAx.contour(np.flipud(autocorrSub), clevsPS, colors='black', alpha = 0.25, extent = ext)
im1 = autocorrAx.contour(np.flipud(autocorrSub), [percZero_autocorr], colors='black', linestyles='dashed', extent = ext)
# Plot major and minor axis of anisotropy
xbar_autocorr = xbar_autocorr - autocorrSizeSub
ybar_autocorr = ybar_autocorr - autocorrSizeSub
# Reverse sign of second dimension for plotting
eigvecs_autocorr[1,:] = -eigvecs_autocorr[1,:]
st.plot_bars(xbar_autocorr, ybar_autocorr, eigvals_autocorr, eigvecs_autocorr, autocorrAx, 'red')
# autocorrAx.invert_yaxis()
# autocorrAx.axis('image')
if an == '2d+autocorr':
xoffset = 0.05
yoffset = 0.93
yspace = 0.04
eccFontSize = 12
else:
xoffset = 0.05
yoffset = 0.95
yspace = 0.05
eccFontSize = 14
plt.text(xoffset, yoffset, 'eccentricity = ' + str(fmt2 % eccentricity_autocorr), transform=autocorrAx.transAxes, backgroundcolor = 'w', fontsize=eccFontSize)
plt.text(xoffset, yoffset-yspace, 'orientation = ' + str(fmt2 % orientation_autocorr) + '$^\circ$', transform=autocorrAx.transAxes,backgroundcolor = 'w', fontsize=eccFontSize)
plt.xticks(rotation=90)
autocorrAx.set_xlabel('Spatial lag [km]', fontsize=labelsSize)
autocorrAx.set_ylabel('Spatial lag [km]', fontsize=labelsSize)
titleStr = str(timeLocal) + ', 2D autocorrelation function (ifft(spectrum))'
titleStr = '2D autocorrelation function'
autocorrAx.set_title(titleStr, fontsize=titlesSize)
#++++++++++++ Draw 1D power spectrum
if (an == '1d') | (an == '1dnoise') | (an == '1d+2d+autocorr'):
if an == '1d+2d+autocorr':
psAx = plt.subplot(224)
else:
psAx = plt.subplot(122)
freqLimBeta1 = np.array([resKm/float(largeScalesLims[0]),resKm/float(largeScalesLims[1])])
psdLimBeta1 = intercept_beta1+beta1*10*np.log10(freqLimBeta1)
plt.plot(10*np.log10(freqLimBeta1), psdLimBeta1,'b--')
freqLimBeta2 = np.array([resKm/float(smallScalesLims[0]),resKm/float(smallScalesLims[1])])
psdLimBeta2 = intercept_beta2+beta2*10*np.log10(freqLimBeta2)
plt.plot(10*np.log10(freqLimBeta2), psdLimBeta2,'r--')
# Draw turning point
plt.vlines(x=10*np.log10(1.0/scalingBreak_best), ymin=psdLimBeta2[0]-5, ymax = psdLimBeta2[0]+5, linewidth=0.5, color='grey')
# Write betas and correlations
startX = 0.67
startY = 0.95
offsetY = 0.04
if weightedOLS == 0:
txt = "Ordinary least squares"
if weightedOLS == 1:
txt = "Weighted ordinary least squares"
# psAx.text(startX,startY, txt, color='k', transform=psAx.transAxes)
txt = r'$\beta_1$ = ' + (fmt2 % beta1) + ", r = " + (fmt3 % r_beta1)
psAx.text(startX,startY-offsetY, txt, color='b', transform=psAx.transAxes)
txt = r'$\beta_2$ = ' + (fmt2 % beta2) + ", r = " + (fmt3 % r_beta2)
psAx.text(startX,startY-2*offsetY, txt, color='r', transform=psAx.transAxes)
txt = 'WAR = ' + (fmt1 % r.war) + ' %'
psAx.text(startX,startY-3*offsetY, txt, transform=psAx.transAxes)
txt = 'MM = ' + (fmt3 %raincondmean) + ' mm/hr'
psAx.text(startX,startY-4*offsetY, txt, transform=psAx.transAxes)
# if (args.minR < 0.01):
# txt = 'Rmin = ' + (fmt3 % args.minR) + ' mm/hr'
# else:
# txt = 'Rmin = ' + (fmt2 % args.minR) + ' mm/hr'
# psAx.text(startX,startY-5*offsetY, txt, transform=psAx.transAxes)
# txt = 'Scaling break = ' + str(scalingBreak_best) + ' km'
# psAx.text(startX,startY-6*offsetY, txt, transform=psAx.transAxes)
# txt = 'Zeros = ' + (fmt1 % zerosDBZ) + ' dBZ - ' + (fmt2 % args.minR) + ' mm/hr'
# psAx.text(startX,startY-7*offsetY, txt, transform=psAx.transAxes, fontsize=10)
if an == '1dnoise':
# Draw 1d noise spectrum
plt.plot(10*np.log10(freq),10*np.log10(psd1dnoise),'k')
else:
# Draw Power spectrum
#print(10*np.log10(freq))
plt.plot(10*np.log10(freq),10*np.log10(psd1d),'k')
titleStr = 'Radially averaged power spectrum'
plt.title(titleStr, fontsize=titlesSize)
plt.xlabel("Wavelength [km]", fontsize=15)
plt.ylabel(unitsSpectrum, fontsize= 15)
if fourierVar == 'rainrate':
plt.ylim([-50.0,40.0])
if fourierVar == 'dbz':
plt.ylim([-20.0,70.0])
# Create ticks in km
ticksList = []
tickLocal = minFieldSize
for i in range(0,20):
ticksList.append(tickLocal)
tickLocal = tickLocal/2
if tickLocal < resKm:
break
ticks = np.array(ticksList, dtype=int)
ticks_loc = 10.0*np.log10(1.0/ticks)
psAx.set_xticks(ticks_loc)
psAx.set_xticklabels(ticks)
# if (an == '1d+2d+autocorr'):
# psAx.set_aspect('equal')
#plt.gcf().subplots_adjust(bottom=0.15, left=0.20)
if (an == '1d+2d+autocorr'):
plt.subplots_adjust(hspace=0.2, wspace=0.35)
else:
fig.tight_layout()
########### SAVE AND COPY PLOTS
# Save plot in scratch
analysisType = an + 'PS'
stringFigName, inDir,_ = io.get_filename_stats(inBaseDir, analysisType, timeLocal,\
product, timeAccumMin=timeAccumMin, quality=0, minR=args.minR, wols=weightedOLS, format='png')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
plt.savefig(stringFigName,dpi=300)
print(stringFigName, ' saved.')
# Copy plot to /store
stringFigNameOut, outDir,_ = io.get_filename_stats(outBaseDir, analysisType, timeLocal, product, timeAccumMin=timeAccumMin, \
quality=0, minR=args.minR, wols=weightedOLS, format='png')
cmd = 'mkdir -p ' + outDir
os.system(cmd)
shutil.copy(stringFigName, stringFigNameOut)
print('Copied: ', stringFigName, ' to ', stringFigNameOut)
else:
nrValidFields = 0 # Reset to 0 the number of valid fields with consecutive rainfall
print('Not enough rain to compute statistics')
############ WRITE OUT DAILY STATS ###########################
print('------------------')
print('Nr valid samples during day: ', len(dailyStats))
minNrDailySamples = 2
try:
conditionForWriting = (len(dailyStats) >= minNrDailySamples) and ((hourminStr == '0000') or (timeLocal == timeEnd))
except:
print(dir(r))
sys.exit(1)
if conditionForWriting:
# List to numpy array
dailyStats = np.array(dailyStats)
# Write stats in the directory of previous day if last time stamp (midnight of next day)
timePreviousDay = timeLocal - datetime.timedelta(days = 1)
# Generate filenames
analysisType = 'STATS'
if hourminStr == '0000':
fileNameStats,_,_ = io.get_filename_stats(inBaseDir, analysisType, timePreviousDay, product, timeAccumMin=timeAccumMin,\
quality=0, minR=args.minR, wols=weightedOLS, variableBreak = variableBreak, format=args.format)
else:
fileNameStats,_,_ = io.get_filename_stats(inBaseDir, analysisType, timeLocal, product, timeAccumMin=timeAccumMin,\
quality=0, minR=args.minR, wols=weightedOLS, variableBreak = variableBreak, format=args.format)
# Write out files
spectralSlopeLims = [largeScalesLims_best[0], largeScalesLims_best[1], smallScalesLims_best[1]]
if (boolPlotting == False):
if args.format == 'csv':
# Write out CSV file
io.write_csv_globalstats(fileNameStats, headers, dailyStats)
elif args.format == 'netcdf':
# Write out NETCDF file
io.write_netcdf_globalstats(fileNameStats, headers, dailyStats, str(args.minR), str(weightedOLS), spectralSlopeLims)
print(fileNameStats, ' saved.')
#### Print out some average daily stats
eulerian_corr_vector = np.array(dt.get_column_list(dailyStats,22)).astype(float)
lagrangian_corr_vector = np.array(dt.get_column_list(dailyStats,23)).astype(float)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
print('Daily average Eulerian correlation =',np.nanmean(eulerian_corr_vector))
print('Daily average Lagrangian correlation =',np.nanmean(lagrangian_corr_vector))
print('Daily difference Eul-Lagr correlation =',100*(np.nanmean(lagrangian_corr_vector) - np.nanmean(eulerian_corr_vector)),'%')
#### Write out wavelet decomposed rainfall arrays
if 'wavelets' in analysis:
# Write out wavelet coefficients to netCDF file
analysisType = 'WAVELET'
if hourminStr == '0000':
fileNameWavelet,_,_ = io.get_filename_wavelets(inBaseDir, analysisType, timePreviousDay, product, \
timeAccumMin=timeAccumMin, scaleKM=scaleKm, format='netcdf')
else:
fileNameWavelet,_,_ = io.get_filename_wavelets(inBaseDir, analysisType, timeLocal, product, \
timeAccumMin=timeAccumMin, scaleKM=scaleKm, format='netcdf')
#timePreviousDayStr = ti.datetime2timestring(timePreviousDay)
# Write out netCDF file
io.write_netcdf_waveletscale(fileNameWavelet, dailyTimesWavelets, \
xvecs[scale2keep], yvecs[scale2keep], dailyWavelets, scaleKm, waveletType = wavelet)
print('Saved:', fileNameWavelet)
# Copy wavelet netCDFs to /store
outFileNameWavelet,outDir,_ = io.get_filename_wavelets(outBaseDir, analysisType, timePreviousDay, product, \
timeAccumMin=timeAccumMin, scaleKM=scaleKm, format='netcdf')
cmd = 'mkdir -p ' + outDir
os.system(cmd)
shutil.copy(fileNameWavelet, outFileNameWavelet)
print('Copied: ', fileNameWavelet, ' to ', outFileNameWavelet)
#### Reset dailyStats array
dailyStats = []
dailyWavelets = []
dailyTimesWavelets = []
############ WRITE OUT DAILY VELOCITY FIELDS ###########################
if conditionForWriting and ('of' in analysis):
analysisType = 'VELOCITY'
fileNameFlow,_,_ = io.get_filename_stats(inBaseDir, analysisType, timeLocal, product, \
timeAccumMin=timeAccumMin, quality=0, format='netcdf')
xvec = Xmin + colgrid*1000
yvec = Ymax - rowgrid*1000 # turn Y vector to start from highest value on top
io.write_netcdf_flow(fileNameFlow, dailyTimesUV, xvec, yvec, dailyU, dailyV)
print(fileNameFlow, 'saved.')
#### Reset daily U,V arrays
dailyU = []
dailyV = []
dailyTimesUV = []
####### UPDATE TIME STAMPS
# Add 5 minutes (or one hour if working with longer accumulations)
timeLocal = timeLocal + datetime.timedelta(minutes = timeSampMin)
tocOneImg = time.clock()
#print('Elapsed time: ', tocOneImg - ticOneImg)
toc = time.clock()
print('Total archive elapsed time: ', toc-tic, ' seconds.')
| gpl-3.0 |
ContinuumIO/numpy | numpy/lib/recfunctions.py | 148 | 35012 | """
Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for
matplotlib. They have been rewritten and extended for convenience.
"""
from __future__ import division, absolute_import, print_function
import sys
import itertools
import numpy as np
import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.lib._iotools import _is_string_like
from numpy.compat import basestring
if sys.version_info[0] < 3:
from future_builtins import zip
_check_fill_value = np.ma.core._check_fill_value
__all__ = [
'append_fields', 'drop_fields', 'find_duplicates',
'get_fieldstructure', 'join_by', 'merge_arrays',
'rec_append_fields', 'rec_drop_fields', 'rec_join',
'recursive_fill_fields', 'rename_fields', 'stack_arrays',
]
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.0), (2, 20.0), (0, 0.0)],
dtype=[('A', '<i4'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
"""
listnames = []
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
return tuple(listnames) or None
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
are flattend beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names:
listnames.extend(get_names_flat(current))
return tuple(listnames) or None
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return ndtype.descr
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
def zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
newdtype = []
if flatten:
for a in seqarrays:
newdtype.extend(flatten_descr(a.dtype))
else:
for a in seqarrays:
current = a.dtype
names = current.names or ()
if len(names) > 1:
newdtype.append(('', current.descr))
else:
newdtype.extend(current.descr)
return np.dtype(newdtype).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
Returns a dictionary with fields indexing lists of their parent fields.
This function is used to simplify access to fields nested in other fields.
Parameters
----------
adtype : np.dtype
Input datatype
lastname : optional
Last processed field name (used internally during recursion).
parents : dictionary
Dictionary of parent fields (used interbally during recursion).
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('A', int),
... ('B', [('BA', int),
... ('BB', [('BBA', int), ('BBB', int)])])])
>>> rfn.get_fieldstructure(ndtype)
... # XXX: possible regression, order of BBA and BBB is swapped
{'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
"""
if parents is None:
parents = {}
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
if lastname:
parents[name] = [lastname, ]
else:
parents[name] = []
parents.update(get_fieldstructure(current, name, parents))
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
return parents or None
def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
"""
for element in iterable:
if isinstance(element, np.void):
for f in _izip_fields_flat(tuple(element)):
yield f
else:
yield element
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
"""
for element in iterable:
if (hasattr(element, '__iter__') and
not isinstance(element, basestring)):
for f in _izip_fields(element):
yield f
elif isinstance(element, np.void) and len(tuple(element)) == 1:
for f in _izip_fields(element):
yield f
else:
yield element
def izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# OK, that's a complete ripoff from Python2.6 itertools.izip_longest
def sentinel(counter=([fill_value] * (len(seqarrays) - 1)).pop):
"Yields the fill_value or raises IndexError"
yield counter()
#
fillers = itertools.repeat(fill_value)
iters = [itertools.chain(it, sentinel(), fillers) for it in seqarrays]
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
#
try:
for tup in zip(*iters):
yield tuple(zipfunc(tup))
except IndexError:
pass
def _fix_output(output, usemask=True, asrecarray=False):
"""
Private function: return a recarray, a ndarray, a MaskedArray
or a MaskedRecords depending on the input parameters
"""
if not isinstance(output, MaskedArray):
usemask = False
if usemask:
if asrecarray:
output = output.view(MaskedRecords)
else:
output = ma.filled(output)
if asrecarray:
output = output.view(recarray)
return output
def _fix_defaults(output, defaults=None):
"""
Update the fill_value and masked data of `output`
from the default given in a dictionary defaults.
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
for (k, v) in (defaults or {}).items():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
return output
def merge_arrays(seqarrays, fill_value=-1, flatten=False,
usemask=False, asrecarray=False):
"""
Merge arrays field by field.
Parameters
----------
seqarrays : sequence of ndarrays
Sequence of arrays
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
flatten : {False, True}, optional
Whether to collapse nested fields.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
mask = [(False, False) (False, False) (True, False)],
fill_value = (999999, 1e+20),
dtype = [('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
... usemask=False)
array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('a', '<i4'), ('f1', '<f8')])
Notes
-----
* Without a mask, the missing value will be filled with something,
* depending on what its corresponding type:
-1 for integers
-1.0 for floating point numbers
'-' for characters
'-1' for strings
True for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence ?
if (len(seqarrays) == 1):
seqarrays = np.asanyarray(seqarrays[0])
# Do we have a single ndarray as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
if (not flatten) or \
(zip_descr((seqarrays,), flatten=True) == seqdtype.descr):
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Make sure we have named fields
if not seqdtype.names:
seqdtype = [('', seqdtype)]
# Find what type of array we must return
if usemask:
if asrecarray:
seqtype = MaskedRecords
else:
seqtype = MaskedArray
elif asrecarray:
seqtype = recarray
else:
seqtype = ndarray
return seqarrays.view(dtype=seqdtype, type=seqtype)
else:
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
seqarrays = [np.asanyarray(_m) for _m in seqarrays]
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
newdtype = zip_descr(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
mask = ma.getmaskarray(a).ravel()
# Get the filling value (if needed)
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
fmsk = True
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
fmsk = np.ones((1,), dtype=mask.dtype)
else:
fval = None
fmsk = True
# Store an iterator padding the input to the expected length
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
data = tuple(izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
mask=list(izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
# And we're done...
return output
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
Parameters
----------
base : array
Input array
drop_names : string or sequence
String or sequence of strings corresponding to the names of the
fields to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence, optional
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype. The default
is False.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
>>> rfn.drop_fields(a, 'a')
array([((2.0, 3),), ((5.0, 6),)],
dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.drop_fields(a, 'ba')
array([(1, (3,)), (4, (6,))],
dtype=[('a', '<i4'), ('b', [('bb', '<i4')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
array([(1,), (4,)],
dtype=[('a', '<i4')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names, ]
else:
drop_names = set(drop_names)
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
for name in names:
current = ndtype[name]
if name in drop_names:
continue
if current.names:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
else:
newdtype.append((name, current))
return newdtype
newdtype = _drop_descr(base.dtype, drop_names)
if not newdtype:
return None
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
"""
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))],
dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names:
newdtype.append(
(newname, _recursive_rename_fields(current, namemapper))
)
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
def append_fields(base, names, data, dtypes=None,
fill_value=-1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
"""
# Check the names
if isinstance(names, (tuple, list)):
if len(names) != len(data):
msg = "The number of arrays does not match the number of names"
raise ValueError(msg)
elif isinstance(names, basestring):
names = [names, ]
data = [data, ]
#
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
else:
if not isinstance(dtypes, (tuple, list)):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(data)
else:
msg = "The dtypes argument must be None, a dtype, or a list."
raise ValueError(msg)
data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
for (a, n, d) in zip(data, names, dtypes)]
#
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
if len(data) > 1:
data = merge_arrays(data, flatten=True, usemask=usemask,
fill_value=fill_value)
else:
data = data.pop()
#
output = ma.masked_all(max(len(base), len(data)),
dtype=base.dtype.descr + data.dtype.descr)
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
Superposes arrays fields by fields
Parameters
----------
arrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> x = np.array([1, 2,])
>>> rfn.stack_arrays(x) is x
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
... dtype=[('A', '|S3'), ('B', float), ('C', float)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0)
('c', 30.0, 300.0)],
mask = [(False, False, True) (False, False, True) (False, False, False)
(False, False, False) (False, False, False)],
fill_value = ('N/A', 1e+20, 1e+20),
dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
return arrays
elif len(arrays) == 1:
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
newdescr = dtype_l.descr
names = [_[0] for _ in newdescr]
for dtype_n in ndtype[1:]:
for descr in dtype_n.descr:
name = descr[0] or ''
if name not in names:
newdescr.append(descr)
names.append(name)
else:
nameidx = names.index(name)
current_descr = newdescr[nameidx]
if autoconvert:
if np.dtype(descr[1]) > np.dtype(current_descr[-1]):
current_descr = list(current_descr)
current_descr[-1] = descr[1]
newdescr[nameidx] = tuple(current_descr)
elif descr[1] != current_descr[-1]:
raise TypeError("Incompatible type '%s' <> '%s'" %
(dict(newdescr)[name], descr[1]))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
else:
#
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[0, nrecords])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
names = a.dtype.names
if names is None:
output['f%i' % len(seen)][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if name not in seen:
seen.append(name)
#
return _fix_output(_fix_defaults(output, defaults),
usemask=usemask, asrecarray=asrecarray)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
... # XXX: judging by the output, the ignoremask flag has no effect
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
to the fields used to join the array. An exception is raised if the
`key` field cannot be found in the two input arrays. Neither `r1` nor
`r2` should have any duplicates along `key`: the presence of duplicates
will make the output quite unreliable. Note that duplicates are not
looked for by the algorithm.
Parameters
----------
key : {string, sequence}
A string or a sequence of strings corresponding to the fields used
for comparison.
r1, r2 : arrays
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
If 'outer', returns the common elements as well as the elements of
r1 not in r2 and the elements of not in r2.
If 'leftouter', returns the common elements and the elements of r1
not in r2.
r1postfix : string, optional
String appended to the names of the fields of r1 that are present
in r2 but absent of the key.
r2postfix : string, optional
String appended to the names of the fields of r2 that are present
in r1 but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
* A temporary array is formed by dropping the fields not in the key for
the two arrays and concatenating the result. This array is then
sorted, and the common entries selected. The output is constructed by
filling the fields with the selected entries. Matching is not
preserved if there are some duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
raise ValueError(
"The 'jointype' argument should be in 'inner', "
"'outer' or 'leftouter' (got '%s' instead)" % jointype
)
# If we have a single key, put it in a tuple
if isinstance(key, basestring):
key = (key,)
# Check the keys
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s' % name)
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
# Fixme: nb2 below is never used. Commenting out for pyflakes.
# (nb1, nb2) = (len(r1), len(r2))
nb1 = len(r1)
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Check the names for collision
if (set.intersection(set(r1names), set(r2names)).difference(key) and
not (r1postfix or r2postfix)):
msg = "r1 and r2 contain common names, r1postfix and r2postfix "
msg += "can't be empty"
raise ValueError(msg)
# Make temporary arrays of just the keys
r1k = drop_fields(r1, [n for n in r1names if n not in key])
r2k = drop_fields(r2, [n for n in r2names if n not in key])
# Concatenate the two arrays for comparison
aux = ma.concatenate((r1k, r2k))
idx_sort = aux.argsort(order=key)
aux = aux[idx_sort]
#
# Get the common keys
flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
flag_in[:-1] = flag_in[1:] + flag_in[:-1]
idx_in = idx_sort[flag_in]
idx_1 = idx_in[(idx_in < nb1)]
idx_2 = idx_in[(idx_in >= nb1)] - nb1
(r1cmn, r2cmn) = (len(idx_1), len(idx_2))
if jointype == 'inner':
(r1spc, r2spc) = (0, 0)
elif jointype == 'outer':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
(r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
elif jointype == 'leftouter':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
(r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
# Select the entries from each input
(s1, s2) = (r1[idx_1], r2[idx_2])
#
# Build the new description of the output array .......
# Start with the key fields
ndtype = [list(_) for _ in r1k.dtype.descr]
# Add the other fields
ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key)
# Find the new list of names (it may be different from r1names)
names = list(_[0] for _ in ndtype)
for desc in r2.dtype.descr:
desc = list(desc)
name = desc[0]
# Have we seen the current name already ?
if name in names:
nameidx = ndtype.index(desc)
current = ndtype[nameidx]
# The current field is part of the key: take the largest dtype
if name in key:
current[-1] = max(desc[1], current[-1])
# The current field is not part of the key: add the suffixes
else:
current[0] += r1postfix
desc[0] += r2postfix
ndtype.insert(nameidx + 1, desc)
#... we haven't: just add the description to the current list
else:
names.extend(desc[0])
ndtype.append(desc)
# Revert the elements to tuples
ndtype = [tuple(_) for _ in ndtype]
# Find the largest nb of common fields :
# r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
if f not in names or (f in r2names and not r2postfix and f not in key):
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
if jointype in ('outer', 'leftouter'):
current[cmn:cmn + r1spc] = selected[r1cmn:]
for f in r2names:
selected = s2[f]
if f not in names or (f in r1names and not r1postfix and f not in key):
f += r2postfix
current = output[f]
current[:r2cmn] = selected[:r2cmn]
if (jointype == 'outer') and r2spc:
current[-r2spc:] = selected[r2cmn:]
# Sort and finalize the output
output.sort(order=key)
kwargs = dict(usemask=usemask, asrecarray=asrecarray)
return _fix_output(_fix_defaults(output, defaults), **kwargs)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
Join arrays `r1` and `r2` on keys.
Alternative to join_by, that always returns a np.recarray.
See Also
--------
join_by : equivalent function
"""
kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
defaults=defaults, usemask=False, asrecarray=True)
return join_by(key, r1, r2, **kwargs)
| bsd-3-clause |
manashmndl/scikit-learn | sklearn/svm/classes.py | 37 | 39951 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive'
(default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, [], sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
joernhees/scikit-learn | sklearn/model_selection/tests/test_search.py | 6 | 51806 | """Test the search module"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from sklearn.externals.joblib._compat import PY3_OR_LATER
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.fixes import in1d
from sklearn.utils.fixes import sp_version
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.exceptions import NotFittedError
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import ParameterSampler
from sklearn.model_selection._validation import FitFailedWarning
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import Ridge, SGDClassifier
from sklearn.model_selection.tests.common import OneTimeSplitter
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the parameter search algorithms"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.classes_ = np.unique(Y)
return self
def predict(self, T):
return T.shape[0]
def transform(self, X):
return X + self.foo_param
def inverse_transform(self, X):
return X - self.foo_param
predict_proba = predict
predict_log_proba = predict
decision_function = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
assert_array_equal(grid_search.cv_results_["param_foo_param"].data,
[1, 2, 3])
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
def check_hyperparameter_searcher_with_fit_params(klass, **klass_kwargs):
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(expected_fit_params=['spam', 'eggs'])
searcher = klass(clf, {'foo_param': [1, 2, 3]}, cv=2, **klass_kwargs)
# The CheckingClassifer generates an assertion error if
# a parameter is missing or has length != len(X).
assert_raise_message(AssertionError,
"Expected fit parameter(s) ['eggs'] not seen.",
searcher.fit, X, y, spam=np.ones(10))
assert_raise_message(AssertionError,
"Fit parameter spam has length 1; expected 4.",
searcher.fit, X, y, spam=np.ones(1),
eggs=np.zeros(10))
searcher.fit(X, y, spam=np.ones(10), eggs=np.zeros(10))
def test_grid_search_with_fit_params():
check_hyperparameter_searcher_with_fit_params(GridSearchCV)
def test_random_search_with_fit_params():
check_hyperparameter_searcher_with_fit_params(RandomizedSearchCV, n_iter=1)
def test_grid_search_fit_params_deprecation():
# NOTE: Remove this test in v0.21
# Use of `fit_params` in the class constructor is deprecated,
# but will still work until v0.21.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(expected_fit_params=['spam'])
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]},
fit_params={'spam': np.ones(10)})
assert_warns(DeprecationWarning, grid_search.fit, X, y)
def test_grid_search_fit_params_two_places():
# NOTE: Remove this test in v0.21
# If users try to input fit parameters in both
# the constructor (deprecated use) and the `fit`
# method, we'll ignore the values passed to the constructor.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(expected_fit_params=['spam'])
# The "spam" array is too short and will raise an
# error in the CheckingClassifier if used.
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]},
fit_params={'spam': np.ones(1)})
expected_warning = ('Ignoring fit_params passed as a constructor '
'argument in favor of keyword arguments to '
'the "fit" method.')
assert_warns_message(RuntimeWarning, expected_warning,
grid_search.fit, X, y, spam=np.ones(10))
# Verify that `fit` prefers its own kwargs by giving valid
# kwargs in the constructor and invalid in the method call
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]},
fit_params={'spam': np.ones(10)})
assert_raise_message(AssertionError, "Fit parameter spam has length 1",
grid_search.fit, X, y, spam=np.ones(1))
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = search_no_scoring.score(X, y)
score_accuracy = search_accuracy.score(X, y)
score_no_score_auc = search_no_score_method_auc.score(X, y)
score_auc = search_auc.score(X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_grid_search_groups():
# Check if ValueError (when groups is None) propagates to GridSearchCV
# And also check if groups is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
groups = rng.randint(0, 3, 15)
clf = LinearSVC(random_state=0)
grid = {'C': [1]}
group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(),
GroupShuffleSplit()]
for cv in group_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
assert_raise_message(ValueError,
"The 'groups' parameter should not be None.",
gs.fit, X, y)
gs.fit(X, y, groups=groups)
non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]
for cv in non_group_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
def test_classes__property():
# Test that classes_ property matches best_estimator_.classes_
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
Cs = [.1, 1, 10]
grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
grid_search.fit(X, y)
assert_array_equal(grid_search.best_estimator_.classes_,
grid_search.classes_)
# Test that regressors do not have a classes_ attribute
grid_search = GridSearchCV(Ridge(), {'alpha': [1.0, 2.0]})
grid_search.fit(X, y)
assert_false(hasattr(grid_search, 'classes_'))
# Test that the grid searcher has no classes_ attribute before it's fit
grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
assert_false(hasattr(grid_search, 'classes_'))
# Test that the grid searcher has no classes_ attribute without a refit
grid_search = GridSearchCV(LinearSVC(random_state=0),
{'C': Cs}, refit=False)
grid_search.fit(X, y)
assert_false(hasattr(grid_search, 'classes_'))
def test_trivial_cv_results_attr():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
def test_no_refit():
# Test that GSCV can be used for model selection alone without refitting
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(not hasattr(grid_search, "best_estimator_") and
hasattr(grid_search, "best_index_") and
hasattr(grid_search, "best_params_"))
# Make sure the predict/transform etc fns raise meaningfull error msg
for fn_name in ('predict', 'predict_proba', 'predict_log_proba',
'transform', 'inverse_transform'):
assert_raise_message(NotFittedError,
('refit=False. %s is available only after '
'refitting on the best parameters' % fn_name),
getattr(grid_search, fn_name), X)
def test_grid_search_error():
# Test that grid search will capture errors on data with different length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_when_param_grid_includes_range():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = None
if PY3_OR_LATER:
grid_search = GridSearchCV(clf, {'foo_param': range(1, 4)})
else:
grid_search = GridSearchCV(clf, {'foo_param': xrange(1, 4)})
grid_search.fit(X, y)
assert_equal(grid_search.best_estimator_.foo_param, 2)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raise_message(
ValueError,
"Parameter values for parameter (C) need to be a sequence"
"(but not a string) or np.ndarray.",
GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raise_message(
ValueError,
"Parameter values for parameter (C) need to be a non-empty sequence.",
GridSearchCV, clf, param_dict)
param_dict = {"C": "1,2,3"}
clf = SVC()
assert_raise_message(
ValueError,
"Parameter values for parameter (C) need to be a sequence"
"(but not a string) or np.ndarray.",
GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n_splits=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n_splits=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
@ignore_warnings
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "cv_results_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='fowlkes_mallows_score')
grid_search.fit(X, y)
# So can FMS ;)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
# test that repeated calls yield identical parameters
param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=3, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
if sp_version >= (0, 16):
param_distributions = {"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
def check_cv_results_array_types(cv_results, param_keys, score_keys):
# Check if the search `cv_results`'s array are of correct types
assert_true(all(isinstance(cv_results[param], np.ma.MaskedArray)
for param in param_keys))
assert_true(all(cv_results[key].dtype == object for key in param_keys))
assert_false(any(isinstance(cv_results[key], np.ma.MaskedArray)
for key in score_keys))
assert_true(all(cv_results[key].dtype == np.float64
for key in score_keys if not key.startswith('rank')))
assert_true(cv_results['rank_test_score'].dtype == np.int32)
def check_cv_results_keys(cv_results, param_keys, score_keys, n_cand):
# Test the search.cv_results_ contains all the required results
assert_array_equal(sorted(cv_results.keys()),
sorted(param_keys + score_keys + ('params',)))
assert_true(all(cv_results[key].shape == (n_cand,)
for key in param_keys + score_keys))
def check_cv_results_grid_scores_consistency(search):
# TODO Remove in 0.20
cv_results = search.cv_results_
res_scores = np.vstack(list([cv_results["split%d_test_score" % i]
for i in range(search.n_splits_)])).T
res_means = cv_results["mean_test_score"]
res_params = cv_results["params"]
n_cand = len(res_params)
grid_scores = assert_warns(DeprecationWarning, getattr,
search, 'grid_scores_')
assert_equal(len(grid_scores), n_cand)
# Check consistency of the structure of grid_scores
for i in range(n_cand):
assert_equal(grid_scores[i].parameters, res_params[i])
assert_array_equal(grid_scores[i].cv_validation_scores,
res_scores[i, :])
assert_array_equal(grid_scores[i].mean_validation_score, res_means[i])
def test_grid_search_cv_results():
X, y = make_classification(n_samples=50, n_features=4,
random_state=42)
n_splits = 3
n_grid_points = 6
params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]),
dict(kernel=['poly', ], degree=[1, 2])]
grid_search = GridSearchCV(SVC(), cv=n_splits, iid=False,
param_grid=params)
grid_search.fit(X, y)
grid_search_iid = GridSearchCV(SVC(), cv=n_splits, iid=True,
param_grid=params)
grid_search_iid.fit(X, y)
param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel')
score_keys = ('mean_test_score', 'mean_train_score',
'rank_test_score',
'split0_test_score', 'split1_test_score',
'split2_test_score',
'split0_train_score', 'split1_train_score',
'split2_train_score',
'std_test_score', 'std_train_score',
'mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time')
n_candidates = n_grid_points
for search, iid in zip((grid_search, grid_search_iid), (False, True)):
assert_equal(iid, search.iid)
cv_results = search.cv_results_
# Check if score and timing are reasonable
assert_true(all(cv_results['rank_test_score'] >= 1))
assert_true(all(cv_results[k] >= 0) for k in score_keys
if k is not 'rank_test_score')
assert_true(all(cv_results[k] <= 1) for k in score_keys
if 'time' not in k and
k is not 'rank_test_score')
# Check cv_results structure
check_cv_results_array_types(cv_results, param_keys, score_keys)
check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates)
# Check masking
cv_results = grid_search.cv_results_
n_candidates = len(grid_search.cv_results_['params'])
assert_true(all((cv_results['param_C'].mask[i] and
cv_results['param_gamma'].mask[i] and
not cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'linear'))
assert_true(all((not cv_results['param_C'].mask[i] and
not cv_results['param_gamma'].mask[i] and
cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'rbf'))
check_cv_results_grid_scores_consistency(search)
def test_random_search_cv_results():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# scipy.stats dists now supports `seed` but we still support scipy 0.12
# which doesn't support the seed. Hence the assertions in the test for
# random_search alone should not depend on randomization.
n_splits = 3
n_search_iter = 30
params = dict(C=expon(scale=10), gamma=expon(scale=0.1))
random_search = RandomizedSearchCV(SVC(), n_iter=n_search_iter,
cv=n_splits, iid=False,
param_distributions=params)
random_search.fit(X, y)
random_search_iid = RandomizedSearchCV(SVC(), n_iter=n_search_iter,
cv=n_splits, iid=True,
param_distributions=params)
random_search_iid.fit(X, y)
param_keys = ('param_C', 'param_gamma')
score_keys = ('mean_test_score', 'mean_train_score',
'rank_test_score',
'split0_test_score', 'split1_test_score',
'split2_test_score',
'split0_train_score', 'split1_train_score',
'split2_train_score',
'std_test_score', 'std_train_score',
'mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time')
n_cand = n_search_iter
for search, iid in zip((random_search, random_search_iid), (False, True)):
assert_equal(iid, search.iid)
cv_results = search.cv_results_
# Check results structure
check_cv_results_array_types(cv_results, param_keys, score_keys)
check_cv_results_keys(cv_results, param_keys, score_keys, n_cand)
# For random_search, all the param array vals should be unmasked
assert_false(any(cv_results['param_C'].mask) or
any(cv_results['param_gamma'].mask))
check_cv_results_grid_scores_consistency(search)
def test_search_iid_param():
# Test the IID parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(SVC(), param_grid={'C': [1, 10]}, cv=cv)
random_search = RandomizedSearchCV(SVC(), n_iter=2,
param_distributions={'C': [1, 10]},
cv=cv)
for search in (grid_search, random_search):
search.fit(X, y)
assert_true(search.iid)
test_cv_scores = np.array(list(search.cv_results_['split%d_test_score'
% s_i][0]
for s_i in range(search.n_splits_)))
train_cv_scores = np.array(list(search.cv_results_['split%d_train_'
'score' % s_i][0]
for s_i in range(search.n_splits_)))
test_mean = search.cv_results_['mean_test_score'][0]
test_std = search.cv_results_['std_test_score'][0]
train_cv_scores = np.array(list(search.cv_results_['split%d_train_'
'score' % s_i][0]
for s_i in range(search.n_splits_)))
train_mean = search.cv_results_['mean_train_score'][0]
train_std = search.cv_results_['std_train_score'][0]
# Test the first candidate
assert_equal(search.cv_results_['param_C'][0], 1)
assert_array_almost_equal(test_cv_scores, [1, 1. / 3.])
assert_array_almost_equal(train_cv_scores, [1, 1])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average and weighted std
expected_test_mean = 1 * 1. / 4. + 1. / 3. * 3. / 4.
expected_test_std = np.sqrt(1. / 4 * (expected_test_mean - 1) ** 2 +
3. / 4 * (expected_test_mean - 1. / 3.) **
2)
assert_almost_equal(test_mean, expected_test_mean)
assert_almost_equal(test_std, expected_test_std)
# For the train scores, we do not take a weighted mean irrespective of
# i.i.d. or not
assert_almost_equal(train_mean, 1)
assert_almost_equal(train_std, 0)
# once with iid=False
grid_search = GridSearchCV(SVC(),
param_grid={'C': [1, 10]},
cv=cv, iid=False)
random_search = RandomizedSearchCV(SVC(), n_iter=2,
param_distributions={'C': [1, 10]},
cv=cv, iid=False)
for search in (grid_search, random_search):
search.fit(X, y)
assert_false(search.iid)
test_cv_scores = np.array(list(search.cv_results_['split%d_test_score'
% s][0]
for s in range(search.n_splits_)))
test_mean = search.cv_results_['mean_test_score'][0]
test_std = search.cv_results_['std_test_score'][0]
train_cv_scores = np.array(list(search.cv_results_['split%d_train_'
'score' % s][0]
for s in range(search.n_splits_)))
train_mean = search.cv_results_['mean_train_score'][0]
train_std = search.cv_results_['std_train_score'][0]
assert_equal(search.cv_results_['param_C'][0], 1)
# scores are the same as above
assert_array_almost_equal(test_cv_scores, [1, 1. / 3.])
# Unweighted mean/std is used
assert_almost_equal(test_mean, np.mean(test_cv_scores))
assert_almost_equal(test_std, np.std(test_cv_scores))
# For the train scores, we do not take a weighted mean irrespective of
# i.i.d. or not
assert_almost_equal(train_mean, 1)
assert_almost_equal(train_std, 0)
def test_search_cv_results_rank_tie_breaking():
X, y = make_blobs(n_samples=50, random_state=42)
# The two C values are close enough to give similar models
# which would result in a tie of their mean cv-scores
param_grid = {'C': [1, 1.001, 0.001]}
grid_search = GridSearchCV(SVC(), param_grid=param_grid)
random_search = RandomizedSearchCV(SVC(), n_iter=3,
param_distributions=param_grid)
for search in (grid_search, random_search):
search.fit(X, y)
cv_results = search.cv_results_
# Check tie breaking strategy -
# Check that there is a tie in the mean scores between
# candidates 1 and 2 alone
assert_almost_equal(cv_results['mean_test_score'][0],
cv_results['mean_test_score'][1])
assert_almost_equal(cv_results['mean_train_score'][0],
cv_results['mean_train_score'][1])
try:
assert_almost_equal(cv_results['mean_test_score'][1],
cv_results['mean_test_score'][2])
except AssertionError:
pass
try:
assert_almost_equal(cv_results['mean_train_score'][1],
cv_results['mean_train_score'][2])
except AssertionError:
pass
# 'min' rank should be assigned to the tied candidates
assert_almost_equal(search.cv_results_['rank_test_score'], [1, 1, 3])
def test_search_cv_results_none_param():
X, y = [[1], [2], [3], [4], [5]], [0, 0, 0, 0, 1]
estimators = (DecisionTreeRegressor(), DecisionTreeClassifier())
est_parameters = {"random_state": [0, None]}
cv = KFold(random_state=0)
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv).fit(X, y)
assert_array_equal(grid_search.cv_results_['param_random_state'],
[0, None])
@ignore_warnings()
def test_search_cv_timing():
svc = LinearSVC(random_state=0)
X = [[1, ], [2, ], [3, ], [4, ]]
y = [0, 1, 1, 0]
gs = GridSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0)
rs = RandomizedSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0, n_iter=2)
for search in (gs, rs):
search.fit(X, y)
for key in ['mean_fit_time', 'std_fit_time']:
# NOTE The precision of time.time in windows is not high
# enough for the fit/score times to be non-zero for trivial X and y
assert_true(np.all(search.cv_results_[key] >= 0))
assert_true(np.all(search.cv_results_[key] < 1))
for key in ['mean_score_time', 'std_score_time']:
assert_true(search.cv_results_[key][1] >= 0)
assert_true(search.cv_results_[key][0] == 0.0)
assert_true(np.all(search.cv_results_[key] < 1))
def test_grid_search_correct_score_results():
# test that correct scores are used
n_splits = 3
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score, cv=n_splits)
cv_results = grid_search.fit(X, y).cv_results_
# Test scorer names
result_keys = list(cv_results.keys())
expected_keys = (("mean_test_score", "rank_test_score") +
tuple("split%d_test_score" % cv_i
for cv_i in range(n_splits)))
assert_true(all(in1d(expected_keys, result_keys)))
cv = StratifiedKFold(n_splits=n_splits)
n_splits = grid_search.n_splits_
for candidate_i, C in enumerate(Cs):
clf.set_params(C=C)
cv_scores = np.array(
list(grid_search.cv_results_['split%d_test_score'
% s][candidate_i]
for s in range(n_splits)))
for i, (train, test) in enumerate(cv.split(X, y)):
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, cv_scores[i])
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
grid_search_pickled = pickle.loads(pickle.dumps(grid_search))
assert_array_almost_equal(grid_search.predict(X),
grid_search_pickled.predict(X))
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
random_search_pickled = pickle.loads(pickle.dumps(random_search))
assert_array_almost_equal(random_search.predict(X),
random_search_pickled.predict(X))
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
res_params = grid_search.cv_results_['params']
for cand_i in range(len(res_params)):
est.set_params(**res_params[cand_i])
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(
correct_score,
grid_search.cv_results_['split%d_test_score' % i][cand_i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
res_params = random_search.cv_results_['params']
for cand_i in range(len(res_params)):
est.set_params(**res_params[cand_i])
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(
correct_score,
random_search.cv_results_['split%d_test_score'
% i][cand_i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
n_candidates = len(gs.cv_results_['params'])
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
def get_cand_scores(i):
return np.array(list(gs.cv_results_['split%d_test_score' % s][i]
for s in range(gs.n_splits_)))
assert all((np.all(get_cand_scores(cand_i) == 0.0)
for cand_i in range(n_candidates)
if gs.cv_results_['param_parameter'][cand_i] ==
FailingClassifier.FAILING_PARAMETER))
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
n_candidates = len(gs.cv_results_['params'])
assert all(np.all(np.isnan(get_cand_scores(cand_i)))
for cand_i in range(n_candidates)
if gs.cv_results_['param_parameter'][cand_i] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
def test_stochastic_gradient_loss_param():
# Make sure the predict_proba works when loss is specified
# as one of the parameters in the param_grid.
param_grid = {
'loss': ['log'],
}
X = np.arange(24).reshape(6, -1)
y = [0, 0, 0, 1, 1, 1]
clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'),
param_grid=param_grid)
# When the estimator is not fitted, `predict_proba` is not available as the
# loss is 'hinge'.
assert_false(hasattr(clf, "predict_proba"))
clf.fit(X, y)
clf.predict_proba(X)
clf.predict_log_proba(X)
# Make sure `predict_proba` is not available when setting loss=['hinge']
# in param_grid
param_grid = {
'loss': ['hinge'],
}
clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'),
param_grid=param_grid)
assert_false(hasattr(clf, "predict_proba"))
clf.fit(X, y)
assert_false(hasattr(clf, "predict_proba"))
def test_search_train_scores_set_to_false():
X = np.arange(6).reshape(6, -1)
y = [0, 0, 0, 1, 1, 1]
clf = LinearSVC(random_state=0)
gs = GridSearchCV(clf, param_grid={'C': [0.1, 0.2]},
return_train_score=False)
gs.fit(X, y)
def test_grid_search_cv_splits_consistency():
# Check if a one time iterable is accepted as a cv parameter.
n_samples = 100
n_splits = 5
X, y = make_classification(n_samples=n_samples, random_state=0)
gs = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=OneTimeSplitter(n_splits=n_splits,
n_samples=n_samples))
gs.fit(X, y)
gs2 = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=KFold(n_splits=n_splits))
gs2.fit(X, y)
def _pop_time_keys(cv_results):
for key in ('mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time'):
cv_results.pop(key)
return cv_results
# OneTimeSplitter is a non-re-entrant cv where split can be called only
# once if ``cv.split`` is called once per param setting in GridSearchCV.fit
# the 2nd and 3rd parameter will not be evaluated as no train/test indices
# will be generated for the 2nd and subsequent cv.split calls.
# This is a check to make sure cv.split is not called once per param
# setting.
np.testing.assert_equal(_pop_time_keys(gs.cv_results_),
_pop_time_keys(gs2.cv_results_))
# Check consistency of folds across the parameters
gs = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.1, 0.2, 0.2]},
cv=KFold(n_splits=n_splits, shuffle=True))
gs.fit(X, y)
# As the first two param settings (C=0.1) and the next two param
# settings (C=0.2) are same, the test and train scores must also be
# same as long as the same train/test indices are generated for all
# the cv splits, for both param setting
for score_type in ('train', 'test'):
per_param_scores = {}
for param_i in range(4):
per_param_scores[param_i] = list(
gs.cv_results_['split%d_%s_score' % (s, score_type)][param_i]
for s in range(5))
assert_array_almost_equal(per_param_scores[0],
per_param_scores[1])
assert_array_almost_equal(per_param_scores[2],
per_param_scores[3])
def test_transform_inverse_transform_round_trip():
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
grid_search.fit(X, y)
X_round_trip = grid_search.inverse_transform(grid_search.transform(X))
assert_array_equal(X, X_round_trip)
| bsd-3-clause |
google/dl_bounds | dl_bounds/src/results.py | 1 | 30995 | # coding=utf-8
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements visualization of exprimental results."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from dl_bounds.src.pysqlite_ext import SqliteDB
from matplotlib import pyplot as plt
import numpy as np
import tensorflow as tf
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_enum("do", "plot_hessian_vs_margin",
["plot_hessian_vs_margin", "table_norms", "table_phased"],
"Type of result to plot.")
tf.flags.DEFINE_string("db", None, "Database location.")
tf.flags.DEFINE_string("dataset", None, "Dataset.")
tf.flags.DEFINE_string("network", None, "Network.")
tf.flags.DEFINE_string("groupby_param", None, "Parameter name.")
tf.flags.DEFINE_string("pdf", None, "PDF filename to plot to.")
tf.flags.DEFINE_boolean("show", False, "Show plot.")
tf.flags.mark_flag_as_required("do")
tf.flags.mark_flag_as_required("db")
tf.flags.mark_flag_as_required("dataset")
tf.flags.mark_flag_as_required("network")
tf.flags.mark_flag_as_required("groupby_param")
class Results(object):
"""Retrieves results from Sqlite database."""
def __init__(self, db_filename, mean_metrics=False):
"""Constructor.
Args:
db_filename: path to sqlite3 database.
mean_metrics: return means of metrics over groupby parameter,
passed to getters.
"""
self.db_filename = db_filename
self.common_metrics = [
"train_error",
"val_error",
"train_zero_one_error",
"val_zero_one_error",
]
self.norm_metrics = [
"weight_l2_norms", "path_l2_norms", "spectral_products",
"spectral_complexities", "cond_numbers",
"ratios_of_mean_deep_embeddings", "ratios_of_mean_shallow_embeddings",
"soft_margins", "weight_variance", "weight_entropy",
"train_grad_norm", "val_grad_norm",
]
self.mean_metrics = mean_metrics
@staticmethod
def records_to_dict(rs, records, name):
rs[name] = np.vstack([rec[name] for rec in records])
def extract_metrics(self, rs, records, param_names):
for name in param_names:
Results.records_to_dict(rs, records, name)
def extract_common_metrics(self, rs, records):
self.extract_metrics(rs, records, self.common_metrics)
if records:
rs["pass_numbers"] = records[0]["pass_numbers"]
def extract_margins(self, rs, records, margin_eps_index=-1):
if records:
if records[0]["soft_margins"].ndim == 3:
rs["sq_margin"] = np.vstack(
[rec["soft_margins"][:, :, margin_eps_index]**2 for rec in records])
elif records[0]["soft_margins"].ndim == 2:
rs["sq_margin"] = np.vstack(
[rec["soft_margins"][:, margin_eps_index]**2 for rec in records])
def get_metrics(self,
dataset,
groupby_param_name,
metrics,
bad_min,
extra_constraints=""):
"""Retrieves list of records, where each record is a dict.
Args:
dataset: name of a dataset.
groupby_param_name: parameter to group results by.
metrics: list of metric names to retrieve.
bad_min: [bool] retrieve results for "bad" minumum experiment.
extra_constraints: extra "WHERE" constraints.
Returns:
list of dicts, where each dict stands for a record.
"""
metrics.extend(self.common_metrics)
if self.mean_metrics:
tf.logging.info("Retrieving means of metrics.")
select_metrics = ["mean(%s, 0) as \"%s\"" % (s, s) for s in metrics]
select_metrics.extend(
["stddev(%s, 0) as \"stddev_%s\"" % (s, s) for s in metrics])
else:
select_metrics = metrics
args = dict(
dataset=dataset,
groupby_param_name=groupby_param_name,
metrics=", ".join(select_metrics),
metric_constraint=" and ".join(["%s is not null" % m for m in metrics]),
bad_min=int(bad_min),
extra_constraints=extra_constraints)
db = SqliteDB(os.path.join(self.db_filename))
if groupby_param_name in ["width", "depth"]:
args["groupby_param_name"] = "network"
sql_query = """
SELECT pass_numbers,
%(groupby_param_name)s,
%(metrics)s
FROM rs
WHERE dataset = "%(dataset)s"
AND bad_min = %(bad_min)s
AND %(metric_constraint)s
%(extra_constraints)s
GROUP by %(groupby_param_name)s
ORDER by %(groupby_param_name)s
""" % args
tf.logging.info(sql_query)
rs = db.execute(sql_query)
# Handling width and depth parameter (for MLP) in a special way
# i.e. parsing the name and convering into integer column
if groupby_param_name == "width":
for (i, rec) in enumerate(rs):
layer_widths = rec["network"].split("_")[1].split("-")
assert len(layer_widths) == 2
assert layer_widths[0] == layer_widths[1]
rs[i]["width"] = int(layer_widths[0])
rs.sort(key=lambda x: x["width"])
elif groupby_param_name == "depth":
for (i, rec) in enumerate(rs):
layer_widths = rec["network"].split("_")[1].split("-")
rs[i]["depth"] = int(len(layer_widths))
rs.sort(key=lambda x: x["depth"])
return rs
def get_sharpness(self, dataset, groupby_param_name, bad_min,
extra_constraints):
"""Get sharpness records.
Retrieves records with common metrics (e.g. training/testing error)
and sharpness metric.
Args:
dataset: name of a dataset.
groupby_param_name: parameter to group results by.
bad_min: [bool] retrieve results for "bad" minumum experiment.
extra_constraints: extra "WHERE" constraints.
Returns:
list of dicts, where each dict stands for a record.
"""
rs = dict()
records = self.get_metrics(dataset, groupby_param_name,
["sharpness", "alpha"], bad_min,
extra_constraints)
for rec in records:
alphas = rec["alpha"]
if alphas.ndim == 2:
alphas = alphas[0, :]
if records[0]["sharpness"].ndim == 3:
for i in range(len(alphas)):
rs["sharpness_%s" % alphas[i]] = np.vstack(
[rec["sharpness"][:, :, i].squeeze() for rec in records])
elif records[0]["sharpness"].ndim == 2:
for i in range(len(alphas)):
rs["sharpness_%s" % alphas[i]] = np.vstack(
[rec["sharpness"][:, i].squeeze() for rec in records])
return rs
def get_all_metrics(self,
dataset,
groupby_param_name,
bad_min=False,
extra_constraints=""):
"""Get records for all the metrics.
Args:
dataset: name of a dataset.
groupby_param_name: parameter to group results by.
bad_min: [bool] retrieve results for "bad" minumum experiment.
extra_constraints: extra "WHERE" constraints.
Returns:
list of dicts, where each dict stands for a record.
"""
# Pulling norm-metrics
records = self.get_metrics(dataset, groupby_param_name, self.norm_metrics,
bad_min, extra_constraints)
rs = dict()
self.extract_common_metrics(rs, records)
self.extract_metrics(rs, records, self.norm_metrics)
self.extract_metrics(rs, records, [groupby_param_name])
self.extract_margins(rs, records)
# Pulling sharpness
sharpness_rs = self.get_sharpness(dataset, groupby_param_name, bad_min,
extra_constraints)
rs.update(sharpness_rs)
# Pulling Hessian spectral norm
hessian_records = self.get_metrics(dataset, groupby_param_name,
["hessian_top_sv_means"], bad_min,
extra_constraints)
self.extract_metrics(rs, hessian_records, ["hessian_top_sv_means"])
return rs
def get_hessian(self,
dataset,
groupby_param_name,
bad_min=False,
extra_constraints=""):
"""Get Hessian spectral norm records.
Retrieves records with common metrics (e.g. training/testing error)
and the Hessian spectral norm metric.
Args:
dataset: name of a dataset.
groupby_param_name: parameter to group results by.
bad_min: [bool] retrieve results for "bad" minumum experiment.
extra_constraints: extra "WHERE" constraints.
Returns:
list of dicts, where each dict stands for a record.
"""
records = self.get_metrics(
dataset, groupby_param_name,
["hessian_top_sv_means", "soft_margins", "train_grad_norm"], bad_min,
extra_constraints)
rs = dict()
self.extract_common_metrics(rs, records)
self.extract_metrics(rs, records, ["hessian_top_sv_means",
"stddev_hessian_top_sv_means",
"train_grad_norm"])
self.extract_metrics(rs, records, [groupby_param_name])
self.extract_margins(rs, records)
return rs
class MetricTable(object):
"""Implements conversion of metric results to a LaTeX table."""
def __init__(self,
db_filename,
dataset,
groupby_param_name,
network,
extra_constraints=""):
"""Constructor.
Args:
db_filename: path to sqlite3 database.
dataset: name of a dataset.
groupby_param_name: parameter to group results by.
network: network name.
extra_constraints: extra "WHERE" constraints.
"""
rs = Results(db_filename)
extra_constraints_sql = ("and network like '%s' %s " % (network,
extra_constraints))
self.records_good_min = rs.get_all_metrics(
dataset,
groupby_param_name,
bad_min=False,
extra_constraints=extra_constraints_sql)
self.records_bad_min = rs.get_all_metrics(
dataset,
groupby_param_name,
bad_min=True,
extra_constraints=extra_constraints_sql)
self.n_params = self.records_good_min["sq_margin"].shape[0]
@staticmethod
def format_number(num):
"""Formats a float.
Args:
num: float value.
Returns:
if num is in [1e-2, 1e+3), returns a float with 10^-2 precision.
Otherwise returns value in scientific format.
"""
if 1e-2 <= num < 1e+3:
return "$%.2f$" % num
elif num == 0.0:
return "$0$"
else:
base, exponent = ("%.1e" % num).split("e")
return "$%s \\cdot 10^{%s}$" % (base, int(exponent))
@staticmethod
def extract_column(records_good_min_metric,
records_bad_min_metric,
records_good_min_metric_stddev=None,
records_bad_min_metric_stddev=None,
bold_col=True):
"""Formats a column of a LaTeX table.
Given a numpy array of records corresponding to good-minumum experiment,
and a bad one, formats these into two adjacent columns.
Highlights minimal and maximal value in the "bad" column.
Args:
records_good_min_metric: numpy array of values from a "good" experiment.
records_bad_min_metric: numpy array of values from a "bad" experiment.
records_good_min_metric_stddev: stddev of "good" experiment.
records_bad_min_metric_stddev: stddev of "bad" experiment.
bold_col: bolden max. and min. values in the "bad" column
otherwise, bolden max. value in the good/bad pair
Returns:
a string in LaTeX format.
"""
min_i_bad_min_metric = np.argmin(records_bad_min_metric)
max_i_bad_min_metric = np.argmax(records_bad_min_metric)
column = []
for (i, z) in enumerate(
zip(records_good_min_metric, records_bad_min_metric)):
zs = map(MetricTable.format_number, z)
if records_bad_min_metric_stddev:
z_stddev = (
MetricTable.format_number(records_good_min_metric_stddev[i]),
MetricTable.format_number(records_bad_min_metric_stddev[i])
)
zs[0] = "%s $\\pm$ %s" % (zs[0], z_stddev[0])
zs[1] = "%s $\\pm$ %s" % (zs[1], z_stddev[1])
if bold_col:
if min_i_bad_min_metric == i or max_i_bad_min_metric == i:
column.append("%s & \\boldmath{%s}" % zs)
else:
column.append("%s & %s" % tuple(map(MetricTable.format_number, z)))
else:
if z[0] > z[1]:
column.append("\\boldmath{%s} & %s" % tuple(zs))
elif z[0] < z[1]:
column.append("%s & \\boldmath{%s}" % tuple(zs))
else:
column.append("%s & %s" % tuple(zs))
return column
@staticmethod
def format_good_bad_table(corner_label, col_labels, row_labels,
rows, print_full_doc):
"""Formats a table with every column split for "good" and "bad" metric.
Args:
corner_label: a label of the top left corner.
col_labels: column labels.
row_labels: row labels.
rows: row content, must be 2 * # of columns.
print_full_doc: format full LaTeX doc., ready to compilation.
Returns:
LaTeX formatted string.
"""
n_cols = len(col_labels)
table_lines = []
if print_full_doc:
table_lines.append(r"\documentclass{article}")
table_lines.append(
r"\usepackage[a4paper, landscape, margin=2mm]{geometry}")
table_lines.append(
r"\usepackage{amsmath,amssymb,amsfonts,amsthm,graphics}")
table_lines.append(r"\begin{document}")
table_lines.append(r"\begin{center}")
table_lines.append(r"\begin{table}")
table_lines.append(r"\scalebox{0.6}{")
table_lines.append(r"\begin{tabular}{%s|}" % ("|l" *
(2 * (n_cols) + 1)))
heads = ([corner_label] + [
r"\multicolumn{2}{|p{3cm}|}{%s}" % col_label
for col_label in col_labels
])
table_lines.append(r"\hline")
table_lines.append(" & ".join(heads) + r" \\")
table_lines.append(r"\hline")
table_lines.append(" & ".join([""] + ["Good", "Bad"] *
(n_cols)) + r"\\ ")
table_lines.append(r"\hline")
table_lines.append("\n".join([
" & ".join([row_labels[i]] + list(row)) + r" \\" + "\n\\hline"
for (i, row) in enumerate(rows)
]))
table_lines.append(r"\end{tabular}")
table_lines.append(r"}")
table_lines.append(r"\end{table}")
if print_full_doc:
table_lines.append(r"\end{center}")
table_lines.append(r"\end{document}")
return "\n".join(table_lines)
def print(self, metrics, normalize_by_margin=False, print_full_doc=False):
"""Formats a latex table for a given set of metrics.
Args:
metrics: list of metric names.
normalize_by_margin: normalize metrics by the squared soft margin.
print_full_doc: wrap LaTeX table into the markup ready for compilation.
Returns:
a table formatted as a LaTeX string.
"""
pass_numbers = self.records_good_min["pass_numbers"]
columns = []
good_sq_soft_margin = self.records_good_min["sq_margin"].squeeze()
bad_sq_soft_margin = self.records_bad_min["sq_margin"].squeeze()
# Subselect index is passed whenever one record is a vector
# e.g. eigenvalues of all layers
for (metric_name, metric_label, subselect_index) in metrics:
records_good_min_metric = self.records_good_min[metric_name].squeeze()
records_bad_min_metric = self.records_bad_min[metric_name].squeeze()
# Backwards compatibility
# older experiments recorded multiple "bad" minima snapshots
# here we are keeping only the last one
if records_bad_min_metric.ndim == 2:
records_bad_min_metric = records_bad_min_metric[-1, :]
if subselect_index:
records_good_min_metric = records_good_min_metric[:, subselect_index]
records_bad_min_metric = records_bad_min_metric[:, subselect_index]
if normalize_by_margin and (metric_name != "sq_margin"):
records_good_min_metric /= good_sq_soft_margin
records_bad_min_metric /= bad_sq_soft_margin
column = MetricTable.extract_column(records_good_min_metric,
records_bad_min_metric)
columns.append(column)
val_error_good = self.records_good_min["val_error"].squeeze()
val_error_bad = self.records_bad_min["val_error"].squeeze()
train_error_good = self.records_good_min["train_error"].squeeze()
train_error_bad = self.records_bad_min["train_error"].squeeze()
val_zero_one_error_good = (
self.records_good_min["val_zero_one_error"].squeeze())
val_zero_one_error_bad = (
self.records_bad_min["val_zero_one_error"].squeeze())
train_zero_one_error_good = (
self.records_good_min["train_zero_one_error"].squeeze())
train_zero_one_error_bad = (
self.records_bad_min["train_zero_one_error"].squeeze())
# Backwards compatibility again
if val_error_bad.ndim == 2:
val_error_bad = val_error_bad[-1, :]
train_error_bad = train_error_bad[-1, :]
val_zero_one_error_bad = val_zero_one_error_bad[-1, :]
train_zero_one_error_bad = train_zero_one_error_bad[-1, :]
error_metrics_all = [[(train_error_bad, train_error_good, "Train error"),
(val_error_bad, val_error_good, "Val error")],
[(train_zero_one_error_bad, train_zero_one_error_good,
"Train error (0/1)"),
(val_zero_one_error_bad, val_zero_one_error_good,
"Val error (0/1)")]]
error_labels = []
for (i, error_metrics) in enumerate(error_metrics_all):
for (metric_bad, metric_good, label) in error_metrics:
column = MetricTable.extract_column(metric_good, metric_bad)
columns.append(column)
error_labels.append(label)
rows = zip(*columns)
table_lines = []
if print_full_doc:
table_lines.append(r"\documentclass{article}")
table_lines.append(
r"\usepackage[a4paper, landscape, margin=2mm]{geometry}")
table_lines.append(
r"\usepackage{amsmath,amssymb,amsfonts,amsthm,graphics}")
table_lines.append(r"\begin{document}")
table_lines.append(r"\begin{center}")
table_lines.append(r"\begin{table}")
table_lines.append(r"\scalebox{0.6}{")
table_lines.append(r"\begin{tabular}{%s|}" % ("|l" *
(2 * (len(metrics) + 4) + 1)))
heads = (["Epoch"] + [
r"\multicolumn{2}{|p{3cm}|}{%s}" % metric_label
for (_, metric_label, _) in metrics
] + [
r"\multicolumn{2}{|p{3cm}|}{%s}" % error_label
for error_label in error_labels
])
table_lines.append(r"\hline")
table_lines.append(" & ".join(heads) + r" \\")
table_lines.append(r"\hline")
table_lines.append(" & ".join([""] + ["Good", "Bad"] *
(len(metrics) + 4)) + r"\\ ")
table_lines.append(r"\hline")
table_lines.append("\n".join([
" & ".join([str(pass_numbers[i])] + list(row)) + r" \\" + "\n\\hline"
for (i, row) in enumerate(rows)
]))
table_lines.append(r"\end{tabular}")
table_lines.append(r"}")
table_lines.append(r"\end{table}")
if print_full_doc:
table_lines.append(r"\end{center}")
table_lines.append(r"\end{document}")
return "\n".join(table_lines)
class MetricVsParamTable(object):
def __init__(self, db_filename, dataset, network, groupby_param_name):
rs = Results(db_filename, mean_metrics=True)
"""Constructor.
db_filename: path to sqlite3 database.
dataset: dataste name.
network: network name.
groupby_param_name: parameter to group results by.
network: network name.
"""
self.records_good_min = rs.get_all_metrics(
dataset,
groupby_param_name,
bad_min=False,
extra_constraints="and network like '%s' " % network)
self.records_bad_min = rs.get_all_metrics(
dataset,
groupby_param_name,
bad_min=True,
extra_constraints="and network like '%s' " % network)
self.groupby_param_name = groupby_param_name
self.n_params = self.records_good_min["sq_margin"].shape[0]
def print(self, print_full_doc):
metrics = [
("weight_l2_norms", "Weight L2 norm"),
("path_l2_norms", "Path L2 norm"),
("spectral_products", "Lip. const of the network"),
("spectral_complexities", "Spectral complexity"),
("hessian_top_sv_means", "Hessian spectral norm"),
("sharpness_0.0005", "Sharpness\\newline (alpha=0.0005)"),
("train_grad_norm", "Train grad. norm"),
("train_error", "Train error"),
("val_error", "Val. error"),
("train_zero_one_error", "Train (0/1) error"),
("val_zero_one_error", "Val. (0/1) error")
]
columns = []
row_labels = [m[1] for m in metrics]
col_labels = [x[0] for x in self.records_good_min[self.groupby_param_name]]
for param_index in range(self.n_params):
col_metric_values_good = []
col_metric_values_bad = []
for (metric_name, _) in metrics:
metric_values_good = self.records_good_min[metric_name][param_index, -1]
metric_values_bad = self.records_bad_min[metric_name][param_index, -1]
col_metric_values_good.append(metric_values_good)
col_metric_values_bad.append(metric_values_bad)
column = MetricTable.extract_column(np.array(col_metric_values_good),
np.array(col_metric_values_bad),
bold_col=False)
columns.append(column)
rows = zip(*columns)
table_text = MetricTable.format_good_bad_table(
self.groupby_param_name.replace("_", " "),
col_labels,
row_labels,
rows,
print_full_doc)
return table_text
class HessianVsMarginPlot(object):
"""Plots experimental results with Hessian spectral norm and margin."""
def __init__(self, db_filename, dataset, network, groupby_param_name,
figure_path):
rs = Results(db_filename, mean_metrics=True)
"""Constructor.
Arranges results into a plot table, where columns are
progressions of various metrics over epochs, and rows
are different settings of a "groupby" parameter (e.g. a learning rate).
db_filename: path to sqlite3 database.
dataset: dataste name.
network: network name.
groupby_param_name: parameter to group results by.
network: network name.
figure_path: path for a PDF file with resulting figure.
"""
self.records_good_min = rs.get_hessian(
dataset,
groupby_param_name,
bad_min=False,
extra_constraints="and network like '%s' " % network)
self.records_bad_min = rs.get_hessian(
dataset,
groupby_param_name,
bad_min=True,
extra_constraints="and network like '%s' " % network)
self.groupby_param_name = groupby_param_name
self.n_params = self.records_good_min["sq_margin"].shape[0]
self.figure_path = figure_path
def plot_one_setting(self, param_index, legend=True):
"""Plot results for one "groupby" parameter.
Args:
param_index: index of a "groupby" parameter.
legend: [bool] plot legend.
"""
pass_numbers = self.records_good_min["pass_numbers"]
hessian_sv_good = self.records_good_min["hessian_top_sv_means"][
param_index, :]
sq_margin_good = self.records_good_min["sq_margin"][param_index, :]
val_error_good = self.records_good_min["val_error"][param_index, :]
train_error_good = self.records_good_min["train_error"][param_index, :]
val_zero_one_error_good = self.records_good_min["val_zero_one_error"][
param_index, :]
train_zero_one_error_good = self.records_good_min["train_zero_one_error"][
param_index, :]
train_grad_norm_good = self.records_good_min["train_grad_norm"][
param_index, :]
hessian_sv_bad = self.records_bad_min["hessian_top_sv_means"][
param_index, :]
sq_margin_bad = self.records_bad_min["sq_margin"][param_index, :]
val_error_bad = self.records_bad_min["val_error"][param_index, :]
train_error_bad = self.records_bad_min["train_error"][param_index, :]
val_zero_one_error_bad = self.records_bad_min["val_zero_one_error"][
param_index, :]
train_zero_one_error_bad = self.records_bad_min["train_zero_one_error"][
param_index, :]
train_grad_norm_bad = self.records_bad_min["train_grad_norm"][
param_index, :]
self.n_cell_rows = 5
title = "%s = %s" % (self.groupby_param_name,
self.records_good_min[
self.groupby_param_name][param_index][0])
self.plot_cell(
param_index + 1,
pass_numbers,
hessian_sv_bad,
hessian_sv_good,
"||Hessian (bad)||_2",
"||Hessian (good)||_2",
title,
plotter=plt.loglog,
add_legend=legend)
self.plot_cell(
param_index + 1,
pass_numbers,
train_grad_norm_bad,
train_grad_norm_good,
"||grad (bad)||",
"||grad (good)||",
title,
plotter=plt.loglog,
add_legend=legend)
self.plot_cell(
self.n_params + param_index + 1,
pass_numbers,
1.0 / sq_margin_bad,
1.0 / sq_margin_good,
"Inv. of margin^2 (bad)",
"Inv. of margin^2 (good)",
title,
plotter=plt.loglog,
add_legend=legend)
self.plot_cell(
2 * self.n_params + param_index + 1,
pass_numbers,
train_error_bad,
train_error_good,
"Train error (bad)",
"Train error (good)",
title,
plotter=plt.semilogx,
add_legend=legend)
self.plot_cell(
2 * self.n_params + param_index + 1,
pass_numbers,
val_error_bad,
val_error_good,
"Val error (bad)",
"Val error (good)",
title,
plotter=plt.semilogx,
add_legend=legend)
self.plot_cell(
3 * self.n_params + param_index + 1,
pass_numbers,
train_zero_one_error_bad,
train_zero_one_error_good,
"Train (0/1) error (bad)",
"Train (0/1) error (good)",
title,
plotter=plt.semilogx,
add_legend=legend)
self.plot_cell(
3 * self.n_params + param_index + 1,
pass_numbers,
val_zero_one_error_bad,
val_zero_one_error_good,
"Val 0/1 error (bad)",
"Val 0/1 error (good)",
title,
plotter=plt.semilogx,
add_legend=legend)
def plot_cell(self,
i,
x,
y_bad,
y_good,
label_bad,
label_good,
title,
plotter=plt.plot,
add_legend=True):
"""Plot one cell of a plot table.
Args:
i: subplot index of a cell.
x: values on the x axis.
y_bad: values on the y axis, for a "bad" experiment.
y_good: values on the y axis, for a "good" experiment.
label_bad: corresponding label.n
label_good: corresponding label.
title: title of a plot.
plotter: matplotlib plotting function.
add_legend: [bool] plot a legend.
"""
if any(np.isnan(y_bad)) or any(np.isnan(y_good)):
tf.logging.info("i=%d, Cannot plot: contains NaNs." % i)
return
ax = plt.subplot(self.n_cell_rows, self.n_params, i)
ax.set_title(title)
plot_rs = plotter(x, y_bad, linewidth=3, label=label_bad)
plotter(
x,
y_good,
linewidth=3,
label=label_good,
color=plot_rs[0].get_color(),
linestyle="--")
if add_legend:
legend = plt.legend(loc="best", fontsize="small")
legend = legend.get_frame().set_alpha(0.5)
plt.grid(True)
def plot(self):
plt.figure(figsize=(self.n_params * 10, 10))
for i in range(self.n_params):
self.plot_one_setting(i, legend=(i == 0))
tf.logging.info("Saving to %s", self.figure_path)
plt.savefig(self.figure_path, tight_layout=True, bbox_inches="tight")
def pdflatex(tex, pdf_path):
_, fname = tempfile.mkstemp()
open(fname, "wt").write(tex)
shell = ("pdflatex --jobname='%s' --output-directory='%s' %s" %
(os.path.basename(pdf_path).split(".")[0], os.path.dirname(pdf_path),
fname))
pdflatex_out = os.popen(shell).read()
tf.logging.info(pdflatex_out)
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
if FLAGS.do == "plot_hessian_vs_margin":
plot = HessianVsMarginPlot(FLAGS.db, FLAGS.dataset, FLAGS.network,
FLAGS.groupby_param, FLAGS.pdf)
plot.plot()
if FLAGS.show:
plt.show()
elif FLAGS.do.startswith("table_"):
if FLAGS.do == "table_norms":
metrics = [("weight_l2_norms", "Weight L2 norm", None),
("path_l2_norms", "Path L2 norm", None),
("spectral_products", "Prod. of layer\\newline spectral norms",
None),
("spectral_complexities", "Spectral\\newline complexity",
None),
("train_grad_norm", "Train grad. norm", None),
("val_grad_norm", "Val grad. norm", None)]
elif FLAGS.do == "table_phased":
metrics = [("weight_variance", "Weight variance", None),
("hessian_top_sv_means", "Hessian\\newline spectral norm",
None),
("train_grad_norm", "Train grad. norm", None),
("val_grad_norm", "Val grad. norm", None),
("sharpness_0.0005",
"Sharpness\\newline (alpha=0.0005)", None),
("weight_entropy", "Weight entropy\\newline($10^3$ bin hist)",
None), ("sq_margin", "Squared\\newline soft margin", None)]
table = MetricTable(FLAGS.db, FLAGS.dataset, "bad_min", FLAGS.network,
"and learning_rate=0.05")
table_text = table.print(metrics, print_full_doc=FLAGS.pdf)
if FLAGS.pdf:
pdflatex(table_text, FLAGS.pdf)
else:
print(table_text)
if __name__ == "__main__":
tf.app.run(main)
| apache-2.0 |
amdouglas/OpenPNM | OpenPNM/Network/__GenericNetwork__.py | 1 | 40997 | # -*- coding: utf-8 -*-
"""
===============================================================================
GenericNetwork: Abstract class to construct pore networks
===============================================================================
"""
import scipy as sp
import scipy.sparse as sprs
import scipy.spatial as sptl
import OpenPNM.Utilities.misc as misc
from OpenPNM.Utilities import topology
from OpenPNM.Base import Core, Controller, Tools, logging
logger = logging.getLogger(__name__)
ctrl = Controller()
topo = topology()
class GenericNetwork(Core):
r"""
GenericNetwork - Base class to construct pore networks
Parameters
----------
name : string
Unique name for Network object
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
logger.name = self.name
# Initialize adjacency and incidence matrix dictionaries
self._incidence_matrix = {}
self._adjacency_matrix = {}
def __setitem__(self, prop, value):
if prop == 'throat.conns':
if sp.shape(value)[1] != 2:
logger.error('Wrong size for throat conns!')
else:
mask = value[:, 0] > value[:, 1]
if mask.any():
logger.debug('The first column in (throat.conns) should be \
smaller than the second one.')
v1 = sp.copy(value[:, 0][mask])
v2 = sp.copy(value[:, 1][mask])
value[:, 0][mask] = v2
value[:, 1][mask] = v1
for geom in self._geometries:
if (prop in geom.keys()) and ('all' not in prop.split('.')):
logger.error(prop + ' is already defined in at least one associated \
Geometry object')
return
super().__setitem__(prop, value)
def __getitem__(self, key):
if key.split('.')[-1] == self.name:
element = key.split('.')[0]
return self[element+'.all']
if key not in self.keys():
logger.debug(key + ' not on Network, constructing data from Geometries')
return self._interleave_data(key, self.geometries())
else:
return super().__getitem__(key)
def _set_net(self, network):
pass
def _get_net(self):
return self
_net = property(fset=_set_net, fget=_get_net)
def create_adjacency_matrix(self, data=None, sprsfmt='coo',
dropzeros=True, sym=True):
r"""
Generates a weighted adjacency matrix in the desired sparse format
Parameters
----------
data : array_like, optional
An array containing the throat values to enter into the matrix (in
graph theory these are known as the 'weights'). If omitted, ones
are used to create a standard adjacency matrix representing
connectivity only.
sprsfmt : string, optional
The sparse storage format to return. Options are:
* 'coo' : (default) This is the native format of OpenPNM data
* 'lil' : Enables row-wise slice of data
* 'csr' : Favored by most linear algebra routines
dropzeros : boolean, optional
Remove 0 elements from the values, instead of creating 0-weighted
links, the default is True.
sym : Boolean, optional
Makes the matrix symmetric about the diagonal, the default is true.
Returns
-------
Returns an adjacency matrix in the specified Scipy sparse format
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.TestNet()
>>> vals = sp.rand(pn.num_throats(),) < 0.5
>>> temp = pn.create_adjacency_matrix(data=vals, sprsfmt='csr')
"""
logger.debug('create_adjacency_matrix: Start of method')
Np = self.num_pores()
Nt = self.num_throats()
# Check if provided data is valid
if data is None:
data = sp.ones((self.num_throats(),))
elif sp.shape(data)[0] != Nt:
raise Exception('Received dataset of incorrect length')
# Clear any zero-weighted connections
if dropzeros:
ind = data > 0
else:
ind = sp.ones_like(data, dtype=bool)
# Get connectivity info from network
conn = self['throat.conns'][ind]
row = conn[:, 0]
col = conn[:, 1]
data = data[ind]
# Append row & col to each other, and data to itself
if sym:
row = sp.append(row, conn[:, 1])
col = sp.append(col, conn[:, 0])
data = sp.append(data, data)
# Generate sparse adjacency matrix in 'coo' format
temp = sprs.coo_matrix((data, (row, col)), (Np, Np))
# Convert to requested format
if sprsfmt == 'coo':
pass # temp is already in coo format
if sprsfmt == 'csr':
temp = temp.tocsr()
if sprsfmt == 'lil':
temp = temp.tolil()
logger.debug('create_adjacency_matrix: End of method')
return temp
def create_incidence_matrix(self, data=None, sprsfmt='coo', dropzeros=True):
r"""
Creates an incidence matrix filled with supplied throat values
Parameters
----------
data : array_like, optional
An array containing the throat values to enter into the matrix (In
graph theory these are known as the 'weights'). If omitted, ones
are used to create a standard incidence matrix representing
connectivity only.
sprsfmt : string, optional
The sparse storage format to return. Options are:
* 'coo' : (default) This is the native format of OpenPNMs data
* 'lil' : Enables row-wise slice of data
* 'csr' : Favored by most linear algebra routines
dropzeros : Boolean, optional
Remove 0 elements from values, instead of creating 0-weighted
links, the default is True.
Returns
-------
An incidence matrix (a cousin to the adjacency matrix, useful for
finding throats of given a pore)
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.TestNet()
>>> vals = sp.rand(pn.num_throats(),) < 0.5
>>> temp = pn.create_incidence_matrix(data=vals,sprsfmt='csr')
"""
logger.debug('create_incidence_matrix: Start of method')
Nt = self.num_throats()
Np = self.num_pores()
# Check if provided data is valid
if data is None:
data = sp.ones((self.num_throats(),))
elif sp.shape(data)[0] != Nt:
raise Exception('Received dataset of incorrect length')
if dropzeros:
ind = data > 0
else:
ind = sp.ones_like(data, dtype=bool)
conn = self['throat.conns'][ind]
row = conn[:, 0]
row = sp.append(row, conn[:, 1])
col = self.throats('all')[ind]
col = sp.append(col, col)
data = sp.append(data[ind], data[ind])
temp = sprs.coo.coo_matrix((data, (row, col)), (Np, Nt))
# Convert to requested format
if sprsfmt == 'coo':
pass # temp is already in coo format
if sprsfmt == 'csr':
temp = temp.tocsr()
if sprsfmt == 'lil':
temp = temp.tolil()
logger.debug('create_incidence_matrix: End of method')
return temp
def find_connected_pores(self, throats=[], flatten=False):
r"""
Return a list of pores connected to the given list of throats
Parameters
----------
throats : array_like
List of throats numbers
flatten : boolean, optional
If flatten is True (default) a 1D array of unique pore numbers
is returned. If flatten is False each location in the the returned
array contains a sub-arras of neighboring pores for each input
throat, in the order they were sent.
Returns
-------
1D array (if flatten is True) or ndarray of arrays (if flatten is False)
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.TestNet()
>>> pn.find_connected_pores(throats=[0,1])
array([[0, 1],
[0, 5]])
>>> pn.find_connected_pores(throats=[0,1], flatten=True)
array([0, 1, 5])
"""
Ts = sp.array(throats, ndmin=1)
if Ts.dtype == bool:
Ts = self.toindices(Ts)
if sp.size(Ts) == 0:
return sp.ndarray([0, 2], dtype=int)
Ps = self['throat.conns'][Ts]
if flatten:
Ps = sp.unique(sp.hstack(Ps))
return Ps
def find_connecting_throat(self, P1, P2):
r"""
Return the throat number connecting pairs of pores
Parameters
----------
P1 , P2 : array_like
The pore numbers whose throats are sought. These can be vectors
of pore numbers, but must be the same length
Returns
-------
Tnum : list of list of int
Returns throat number(s), or empty array if pores are not connected
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.TestNet()
>>> pn.find_connecting_throat([0, 1, 2], [2, 2, 2])
[[], [3], []]
TODO: This now works on 'vector' inputs, but is not actually vectorized
in the Numpy sense, so could be slow with large P1,P2 inputs
"""
P1 = sp.array(P1, ndmin=1)
P2 = sp.array(P2, ndmin=1)
Ts1 = self.find_neighbor_throats(P1, flatten=False)
Ts2 = self.find_neighbor_throats(P2, flatten=False)
Ts = []
for row in range(0, len(P1)):
if P1[row] == P2[row]:
throat = []
else:
throat = sp.intersect1d(Ts1[row], Ts2[row]).tolist()
Ts.insert(0, throat)
Ts.reverse()
return Ts
def find_neighbor_pores(self, pores, mode='union', flatten=True, excl_self=True):
r"""
Returns a list of pores neighboring the given pore(s)
Parameters
----------
pores : array_like
ID numbers of pores whose neighbors are sought.
flatten : boolean, optional
If flatten is True a 1D array of unique pore ID numbers is
returned. If flatten is False the returned array contains arrays
of neighboring pores for each input pore, in the order they were
sent.
excl_self : bool, optional (Default is False)
If this is True then the input pores are not included in the
returned list. This option only applies when input pores
are in fact neighbors to each other, otherwise they are not
part of the returned list anyway.
mode : string, optional
Specifies which neighbors should be returned. The options are:
* 'union' : All neighbors of the input pores
* 'intersection' : Only neighbors shared by all input pores
* 'not_intersection' : Only neighbors not shared by any input pores
Returns
-------
neighborPs : 1D array (if flatten is True) or ndarray of ndarrays (if
flatten if False)
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.TestNet()
>>> pn.find_neighbor_pores(pores=[0, 2])
array([ 1, 3, 5, 7, 25, 27])
>>> pn.find_neighbor_pores(pores=[0, 1])
array([ 2, 5, 6, 25, 26])
>>> pn.find_neighbor_pores(pores=[0, 1], mode='union', excl_self=False)
array([ 0, 1, 2, 5, 6, 25, 26])
>>> pn.find_neighbor_pores(pores=[0, 2],flatten=False)
array([array([ 1, 5, 25]), array([ 1, 3, 7, 27])], dtype=object)
>>> pn.find_neighbor_pores(pores=[0, 2],mode='intersection')
array([1])
>>> pn.find_neighbor_pores(pores=[0, 2],mode='not_intersection')
array([ 3, 5, 7, 25, 27])
"""
pores = sp.array(pores, ndmin=1)
if pores.dtype == bool:
pores = self.toindices(pores)
if sp.size(pores) == 0:
return sp.array([], ndmin=1, dtype=int)
# Test for existence of incidence matrix
try:
neighborPs = self._adjacency_matrix['lil'].rows[[pores]]
except:
temp = self.create_adjacency_matrix(sprsfmt='lil')
self._adjacency_matrix['lil'] = temp
neighborPs = self._adjacency_matrix['lil'].rows[[pores]]
if [sp.asarray(x) for x in neighborPs if x] == []:
return sp.array([], ndmin=1)
if flatten:
# All the empty lists must be removed to maintain data type after
# hstack (numpy bug?)
neighborPs = [sp.asarray(x) for x in neighborPs if x]
neighborPs = sp.hstack(neighborPs)
neighborPs = sp.concatenate((neighborPs, pores))
# Remove references to input pores and duplicates
if mode == 'not_intersection':
neighborPs = sp.array(sp.unique(sp.where(
sp.bincount(neighborPs) == 1)[0]), dtype=int)
elif mode == 'union':
neighborPs = sp.array(sp.unique(neighborPs), int)
elif mode == 'intersection':
neighborPs = sp.array(sp.unique(sp.where(
sp.bincount(neighborPs) > 1)[0]), dtype=int)
if excl_self:
neighborPs = neighborPs[~sp.in1d(neighborPs, pores)]
else:
for i in range(0, sp.size(pores)):
neighborPs[i] = sp.array(neighborPs[i], dtype=int)
return sp.array(neighborPs, ndmin=1)
def find_neighbor_throats(self, pores, mode='union', flatten=True):
r"""
Returns a list of throats neighboring the given pore(s)
Parameters
----------
pores : array_like
Indices of pores whose neighbors are sought
flatten : boolean, optional
If flatten is True (default) a 1D array of unique throat ID numbers
is returned. If flatten is False the returned array contains arrays
of neighboring throat ID numbers for each input pore, in the order
they were sent.
mode : string, optional
Specifies which neighbors should be returned. The options are:
* 'union' : All neighbors of the input pores
* 'intersection' : Only neighbors shared by all input pores
* 'not_intersection' : Only neighbors not shared by any input pores
Returns
-------
neighborTs : 1D array (if flatten is True) or ndarray of arrays (if
flatten if False)
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.TestNet()
>>> pn.find_neighbor_throats(pores=[0, 1])
array([0, 1, 2, 3, 4, 5])
>>> pn.find_neighbor_throats(pores=[0, 1],flatten=False)
array([array([0, 1, 2]), array([0, 3, 4, 5])], dtype=object)
"""
pores = sp.array(pores, ndmin=1)
if pores.dtype == bool:
pores = self.toindices(pores)
if sp.size(pores) == 0:
return sp.array([], ndmin=1, dtype=int)
# Test for existence of incidence matrix
try:
neighborTs = self._incidence_matrix['lil'].rows[[pores]]
except:
temp = self.create_incidence_matrix(sprsfmt='lil')
self._incidence_matrix['lil'] = temp
neighborTs = self._incidence_matrix['lil'].rows[[pores]]
if [sp.asarray(x) for x in neighborTs if x] == []:
return sp.array([], ndmin=1)
if flatten:
# All the empty lists must be removed to maintain data type after
# hstack (numpy bug?)
neighborTs = [sp.asarray(x) for x in neighborTs if x]
neighborTs = sp.hstack(neighborTs)
# Remove references to input pores and duplicates
if mode == 'not_intersection':
neighborTs = sp.unique(sp.where(sp.bincount(neighborTs) == 1)[0])
elif mode == 'union':
neighborTs = sp.unique(neighborTs)
elif mode == 'intersection':
neighborTs = sp.unique(sp.where(sp.bincount(neighborTs) > 1)[0])
else:
for i in range(0, sp.size(pores)):
neighborTs[i] = sp.array(neighborTs[i])
return sp.array(neighborTs, ndmin=1)
def num_neighbors(self, pores, flatten=False):
r"""
Returns an ndarray containing the number of neigbhor pores for each
element in pores
Parameters
----------
pores : array_like
Pores whose neighbors are to be counted
flatten : boolean (optional)
If False (default) the number pore neighbors for each input are
returned as an array. If True the sum total number of unique
neighbors is counted, not including the input pores even if they
neighbor each other.
Returns
-------
num_neighbors : 1D array with number of neighbors in each element
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.TestNet()
>>> pn.num_neighbors(pores=[0, 1], flatten=False)
array([3, 4])
>>> pn.num_neighbors(pores=[0, 1], flatten=True)
5
>>> pn.num_neighbors(pores=[0, 2], flatten=True)
6
"""
pores = sp.array(pores, ndmin=1)
if pores.dtype == bool:
pores = self.toindices(pores)
if sp.size(pores) == 0:
return sp.array([], ndmin=1, dtype=int)
# Count number of neighbors
if flatten:
neighborPs = self.find_neighbor_pores(pores,
flatten=True,
mode='union',
excl_self=True)
num = sp.shape(neighborPs)[0]
else:
neighborPs = self.find_neighbor_pores(pores, flatten=False)
num = sp.zeros(sp.shape(neighborPs), dtype=int)
for i in range(0, sp.shape(num)[0]):
num[i] = sp.size(neighborPs[i])
return num
def find_interface_throats(self, labels=[]):
r"""
Finds the throats that join two pore labels.
Parameters
----------
labels : list of strings
The labels of the two pore groups whose interface is sought
Returns
-------
An array of throat numbers that connect the given pore groups
Notes
-----
This method is meant to find interfaces between TWO groups, regions or
clusters of pores (as defined by their label). If the input labels
overlap or are not adjacent, an empty array is returned.
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.TestNet()
>>> pn['pore.domain1'] = False
>>> pn['pore.domain2'] = False
>>> pn['pore.domain1'][[0, 1, 2]] = True
>>> pn['pore.domain2'][[5, 6, 7]] = True
>>> pn.find_interface_throats(labels=['domain1', 'domain2'])
array([1, 4, 7])
TODO: It might be a good idea to allow overlapping regions
"""
Tind = sp.array([], ndmin=1)
if sp.shape(labels)[0] != 2:
logger.error('Exactly two labels must be given')
pass
else:
P1 = self.pores(labels=labels[0])
P2 = self.pores(labels=labels[1])
# Check if labels overlap
if sp.sum(sp.in1d(P1, P2)) > 0:
logger.error('Some labels overlap, iterface cannot be found')
pass
else:
T1 = self.find_neighbor_throats(P1)
T2 = self.find_neighbor_throats(P2)
Tmask = sp.in1d(T1, T2)
Tind = T1[Tmask]
return Tind
def find_clusters(self, mask=[]):
r"""
Identify connected clusters of pores in the network.
Parameters
----------
mask : array_like, boolean
A list of active nodes. This method will automatically search
for clusters based on site or bond connectivity depending on
wheather the received mask is Np or Nt long.
Returns
-------
clusters : array_like
An Np long list of clusters numbers
"""
if sp.size(mask) == self.num_throats():
# Convert to boolean mask if not already
temp = sp.zeros((self.num_throats(),), dtype=bool)
temp[mask] = True
elif sp.size(mask) == self.num_pores():
conns = self.find_connected_pores(throats=self.throats())
conns[:, 0] = mask[conns[:, 0]]
conns[:, 1] = mask[conns[:, 1]]
temp = sp.array(conns[:, 0]*conns[:, 1], dtype=bool)
else:
raise Exception('Mask received was neither Nt nor Np long')
temp = self.create_adjacency_matrix(data=temp,
sprsfmt='csr',
dropzeros=True)
clusters = sprs.csgraph.connected_components(csgraph=temp,
directed=False)[1]
return clusters
def find_clusters2(self, mask=[], t_labels=False):
r"""
Identify connected clusters of pores in the network. This method can
also return a list of throat labels, which correspond to the pore
labels to which the throat is connected. Either site and bond
percolation can be consider, see description of input arguments for
details.
Parameters
----------
mask : array_like, boolean
A list of active bonds or sites (throats or pores). If the mask is
Np long, then the method will perform a site percolation, while if
the mask is Nt long bond percolation will be performed.
t_labels : boolean (default id False)
Indicates if throat cluster labels should also be returned. If true
then a tuple containing both p_clusters and t_clusters is returned.
Returns
-------
A Np long list of pore clusters numbers, unless t_labels is True in
which case a tuple containing both pore and throat cluster labels is
returned. The label numbers correspond such that pores and throats
with the same label are part of the same cluster.
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.Cubic(shape=[25, 25, 1])
>>> geom = OpenPNM.Geometry.GenericGeometry(network=pn,
... pores=pn.Ps,
... throats=pn.Ts)
>>> geom['pore.seed'] = sp.rand(pn.Np)
>>> geom['throat.seed'] = sp.rand(pn.Nt)
Bond percolation is achieved by sending a list of invaded throats:
>>> (p_bond,t_bond) = pn.find_clusters2(mask=geom['throat.seed'] < 0.3,
... t_labels=True)
Site percolation is achieved by sending a list of invaded pores:
>>> (p_site,t_site) = pn.find_clusters2(mask=geom['pore.seed'] < 0.3,
... t_labels=True)
To visualize the invasion pattern, use matplotlib's matshow method
along with the Cubic Network's asarray method which converts list based
data to square arrays:
.. code-block:: python
import matplotlib.pyplot as plt
im_bond = pn.asarray(p_bond)[:, :, 0]
im_site = pn.asarray(p_site)[:, :, 0]
plt.subplot(1, 2, 1)
plt.imshow(im_site, interpolation='none')
plt.subplot(1, 2, 2)
plt.imshow(im_bond, interpolation='none')
"""
# Parse the input arguments
mask = sp.array(mask, ndmin=1)
if mask.dtype != bool:
raise Exception('Mask must be a boolean array of Np or Nt length')
# If pore mask was given perform site percolation
if sp.size(mask) == self.Np:
(p_clusters, t_clusters) = self._site_percolation(mask)
# If pore mask was given perform bond percolation
elif sp.size(mask) == self.Nt:
(p_clusters, t_clusters) = self._bond_percolation(mask)
else:
raise Exception('Mask received was neither Nt nor Np long')
if t_labels:
return (p_clusters, t_clusters)
else:
return p_clusters
def _site_percolation(self, pmask):
r"""
"""
# Find throats that produce site percolation
conns = sp.copy(self['throat.conns'])
conns[:, 0] = pmask[conns[:, 0]]
conns[:, 1] = pmask[conns[:, 1]]
# Only if both pores are True is the throat set to True
tmask = sp.all(conns, axis=1)
# Perform the clustering using scipy.csgraph
csr = self.create_adjacency_matrix(data=tmask,
sprsfmt='csr',
dropzeros=True)
clusters = sprs.csgraph.connected_components(csgraph=csr,
directed=False)[1]
# Adjust cluster numbers such that non-invaded pores are labelled -1
# Note: The following line also takes care of assigning cluster numbers
# to single isolated invaded pores
p_clusters = (clusters + 1)*(pmask) - 1
# Label invaded throats with their neighboring pore's label
t_clusters = clusters[self['throat.conns']]
ind = (t_clusters[:, 0] == t_clusters[:, 1])
t_clusters = t_clusters[:, 0]
# Label non-invaded throats with -1
t_clusters[~ind] = -1
return (p_clusters, t_clusters)
def _bond_percolation(self, tmask):
r"""
"""
# Perform the clustering using scipy.csgraph
csr = self.create_adjacency_matrix(data=tmask,
sprsfmt='csr',
dropzeros=True)
clusters = sprs.csgraph.connected_components(csgraph=csr,
directed=False)[1]
# Convert clusters to a more usable output:
# Find pores attached to each invaded throats
Ps = self.find_connected_pores(throats=tmask, flatten=True)
# Adjust cluster numbers such that non-invaded pores are labelled -0
p_clusters = (clusters + 1)*(self.tomask(pores=Ps).astype(int)) - 1
# Label invaded throats with their neighboring pore's label
t_clusters = clusters[self['throat.conns']][:, 0]
# Label non-invaded throats with -1
t_clusters[~tmask] = -1
return (p_clusters, t_clusters)
def find_nearby_pores(self, pores, distance, flatten=False, excl_self=True):
r"""
Find all pores within a given radial distance of the input pore(s)
regardless of whether or not they are toplogically connected.
Parameters
----------
pores : array_like
The list of pores for whom nearby neighbors are to be found
distance : scalar
The maximum distance within which the nearby should be found
excl_self : bool
Controls whether the input pores should be included in the returned
list. The default is True which means they are not included.
flatten :
Returns
-------
A list of pores which are within the given spatial distance. If a
list of N pores is supplied, then a an N-long list of such lists is
returned. The returned lists each contain the pore for which the
neighbors were sought.
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.TestNet()
>>> pn.find_nearby_pores(pores=[0, 1], distance=1)
array([array([ 1, 5, 25]), array([ 0, 2, 6, 26])], dtype=object)
>>> pn.find_nearby_pores(pores=[0, 1], distance=0.5)
array([], shape=(2, 0), dtype=int64)
"""
# Convert to ND-array
pores = sp.array(pores, ndmin=1)
# Convert boolean mask to indices if necessary
if pores.dtype == bool:
pores = self.Ps[pores]
# Handle an empty array if given
if sp.size(pores) == 0:
return sp.array([], dtype=sp.int64)
if distance <= 0:
logger.error('Provided distances should be greater than 0')
if flatten:
Pn = sp.array([])
else:
Pn = sp.array([sp.array([]) for i in range(0, len(pores))])
return Pn.astype(sp.int64)
# Create kdTree objects
kd = sptl.cKDTree(self['pore.coords'])
kd_pores = sptl.cKDTree(self['pore.coords'][pores])
# Perform search
Pn = kd_pores.query_ball_tree(kd, r=distance)
# Sort the indices in each list
[Pn[i].sort() for i in range(0, sp.size(pores))]
if flatten: # Convert list of lists to a flat nd-array
temp = []
[temp.extend(Ps) for Ps in Pn]
Pn = sp.unique(temp)
if excl_self: # Remove inputs if necessary
Pn = Pn[~sp.in1d(Pn, pores)]
else: # Convert list of lists to an nd-array of nd-arrays
if excl_self: # Remove inputs if necessary
[Pn[i].remove(pores[i]) for i in range(0, sp.size(pores))]
temp = []
[temp.append(sp.array(Pn[i])) for i in range(0, sp.size(pores))]
Pn = sp.array(temp)
if Pn.dtype == float:
Pn = Pn.astype(sp.int64)
return Pn
def extend(self, pore_coords=[], throat_conns=[], labels=[]):
topo.extend(network=self, pore_coords=pore_coords,
throat_conns=throat_conns, labels=labels)
extend.__doc__ = topo.extend.__doc__
def trim(self, pores=[], throats=[]):
topo.trim(network=self, pores=pores, throats=throats)
trim.__doc__ = topo.trim.__doc__
def clone_pores(self, pores, apply_label=['clone'], mode='parents'):
topo.clone_pores(network=self, pores=pores,
apply_label=apply_label, mode=mode)
clone_pores.__doc__ = topo.clone_pores.__doc__
def stitch(self, donor, P_donor, P_network, method, len_max=sp.inf,
label_suffix=''):
topo.stitch(network=self, donor=donor, P_donor=P_donor,
P_network=P_network, method=method, len_max=len_max,
label_suffix=label_suffix)
stitch.__doc__ = topo.stitch.__doc__
def connect_pores(self, pores1, pores2, labels=[]):
topo.connect_pores(network=self,
pores1=pores1,
pores2=pores2,
labels=labels)
connect_pores.__doc__ = topo.connect_pores.__doc__
def check_network_health(self):
r"""
This method check the network topological health by checking for:
(1) Isolated pores
(2) Islands or isolated clusters of pores
(3) Duplicate throats
(4) Bidirectional throats (ie. symmetrical adjacency matrix)
Returns
-------
A dictionary containing the offending pores or throat numbers under
each named key.
It also returns a list of which pores and throats should be trimmed
from the network to restore health. This list is a suggestion only,
and is based on keeping the largest cluster and trimming the others.
Notes
-----
- Does not yet check for duplicate pores
- Does not yet suggest which throats to remove
- This is just a 'check' method and does not 'fix' the problems it finds
"""
health = Tools.HealthDict()
health['disconnected_clusters'] = []
health['isolated_pores'] = []
health['trim_pores'] = []
health['duplicate_throats'] = []
health['bidirectional_throats'] = []
# Check for individual isolated pores
Ps = self.num_neighbors(self.pores())
if sp.sum(Ps == 0) > 0:
logger.warning(str(sp.sum(Ps == 0)) + ' pores have no neighbors')
health['isolated_pores'] = sp.where(Ps == 0)[0]
# Check for separated clusters of pores
temp = []
Cs = self.find_clusters(self.tomask(throats=self.throats('all')))
if sp.shape(sp.unique(Cs))[0] > 1:
logger.warning('Isolated clusters exist in the network')
for i in sp.unique(Cs):
temp.append(sp.where(Cs == i)[0])
b = sp.array([len(item) for item in temp])
c = sp.argsort(b)[::-1]
for i in range(0, len(c)):
health['disconnected_clusters'].append(temp[c[i]])
if i > 0:
health['trim_pores'].extend(temp[c[i]])
# Check for duplicate throats
i = self['throat.conns'][:, 0]
j = self['throat.conns'][:, 1]
v = sp.array(self['throat.all'], dtype=int)
Np = self.num_pores()
adjmat = sprs.coo_matrix((v, (i, j)), [Np, Np])
temp = adjmat.tolil() # Convert to lil to combine duplicates
# Compile lists of which specfic throats are duplicates
# Be VERY careful here, as throats are not in order
mergeTs = []
for i in range(0, self.Np):
if sp.any(sp.array(temp.data[i]) > 1):
ind = sp.where(sp.array(temp.data[i]) > 1)[0]
P = sp.array(temp.rows[i])[ind]
Ts = self.find_connecting_throat(P1=i, P2=P)[0]
mergeTs.append(Ts)
health['duplicate_throats'] = mergeTs
# Check for bidirectional throats
num_full = adjmat.sum()
temp = sprs.triu(adjmat, k=1)
num_upper = temp.sum()
if num_full > num_upper:
biTs = sp.where(self['throat.conns'][:, 0] >
self['throat.conns'][:, 1])[0]
health['bidirectional_throats'] = biTs.tolist()
return health
def check_geometry_health(self):
r"""
Perform a check to find pores with overlapping or undefined Geometries
"""
geoms = self.geometries()
Ptemp = sp.zeros((self.Np,))
Ttemp = sp.zeros((self.Nt,))
for item in geoms:
Pind = self['pore.'+item]
Tind = self['throat.'+item]
Ptemp[Pind] = Ptemp[Pind] + 1
Ttemp[Tind] = Ttemp[Tind] + 1
health = Tools.HealthDict()
health['overlapping_pores'] = sp.where(Ptemp > 1)[0].tolist()
health['undefined_pores'] = sp.where(Ptemp == 0)[0].tolist()
health['overlapping_throats'] = sp.where(Ttemp > 1)[0].tolist()
health['undefined_throats'] = sp.where(Ttemp == 0)[0].tolist()
return health
def _update_network(self, mode='clear'):
r"""
Regenerates the adjacency and incidence matrices
Parameters
----------
mode : string
Controls the extent of the update. Options are:
- 'clear' : Removes exsiting adjacency and incidence matrices
- 'regenerate' : Removes the existing matrices and regenerates new ones.
Notes
-----
The 'regenerate' mode is more time consuming, so repeated calls to
this function (ie. during network merges, and adding boundaries)
should use the 'clear' mode. The other methods that require these
matrices will generate them as needed, so this pushes the 'generation'
time to 'on demand'.
"""
logger.debug('Resetting adjacency and incidence matrices')
self._adjacency_matrix['coo'] = {}
self._adjacency_matrix['csr'] = {}
self._adjacency_matrix['lil'] = {}
self._incidence_matrix['coo'] = {}
self._incidence_matrix['csr'] = {}
self._incidence_matrix['lil'] = {}
if mode == 'regenerate':
self._adjacency_matrix['coo'] = \
self.create_adjacency_matrix(sprsfmt='coo')
self._adjacency_matrix['csr'] = \
self.create_adjacency_matrix(sprsfmt='csr')
self._adjacency_matrix['lil'] = \
self.create_adjacency_matrix(sprsfmt='lil')
self._incidence_matrix['coo'] = \
self.create_incidence_matrix(sprsfmt='coo')
self._incidence_matrix['csr'] = \
self.create_incidence_matrix(sprsfmt='csr')
self._incidence_matrix['lil'] = \
self.create_incidence_matrix(sprsfmt='lil')
def domain_bulk_volume(self):
raise NotImplementedError()
def domain_pore_volume(self):
raise NotImplementedError()
def domain_length(self, face_1, face_2):
r"""
Calculate the distance between two faces of the network
Parameters
----------
face_1 and face_2 : array_like
Lists of pores belonging to opposite faces of the network
Returns
-------
The length of the domain in the specified direction
Notes
-----
- Does not yet check if input faces are perpendicular to each other
"""
# Ensure given points are coplanar before proceeding
if misc.iscoplanar(self['pore.coords'][face_1]) and \
misc.iscoplanar(self['pore.coords'][face_2]):
# Find distance between given faces
x = self['pore.coords'][face_1]
y = self['pore.coords'][face_2]
Ds = misc.dist(x, y)
L = sp.median(sp.amin(Ds, axis=0))
else:
logger.warning('The supplied pores are not coplanar. Length will be \
approximate.')
f1 = self['pore.coords'][face_1]
f2 = self['pore.coords'][face_2]
distavg = [0, 0, 0]
distavg[0] = sp.absolute(sp.average(f1[:, 0])-sp.average(f2[:, 0]))
distavg[1] = sp.absolute(sp.average(f1[:, 1])-sp.average(f2[:, 1]))
distavg[2] = sp.absolute(sp.average(f1[:, 2])-sp.average(f2[:, 2]))
L = max(distavg)
return L
def domain_area(self, face):
r"""
Calculate the area of a given network face
Parameters
----------
face : array_like
List of pores of pore defining the face of interest
Returns
-------
The area of the specified face
"""
coords = self['pore.coords'][face]
rads = self['pore.diameter'][face] / 2.
# Calculate the area of the 3 principle faces of the bounding cuboid
dx = max(coords[:, 0]+rads) - min(coords[:, 0] - rads)
dy = max(coords[:, 1]+rads) - min(coords[:, 1] - rads)
dz = max(coords[:, 2]+rads) - min(coords[:, 2] - rads)
yz = dy*dz # x normal
xz = dx*dz # y normal
xy = dx*dy # z normal
# Find the directions parallel to the plane
directions = sp.where([yz, xz, xy] != max([yz, xz, xy]))[0]
try:
# Use the whole network to do the area calculation
coords = self['pore.coords']
rads = self['pore.diameter']/2.
d0 = max(coords[:, directions[0]] + rads) - \
min(coords[:, directions[0]] - rads)
d1 = max(coords[:, directions[1]] + rads) - \
min(coords[:, directions[1]] - rads)
A = d0*d1
except:
# If that fails, use the max face area of the bounding cuboid
A = max([yz, xz, xy])
if not misc.iscoplanar(self['pore.coords'][face]):
logger.warning('The supplied pores are not coplanar. Area will be'
'approximate')
pass
return A
def _compress_labels(self, label_array):
# Make cluster number contiguous
array = sp.array(label_array)
if array.dtype != int:
raise Exception('label_array must be intergers')
min_val = sp.amin(array)
if min_val >= 0:
min_val = 0
array = array + sp.absolute(min_val)
nums = sp.unique(array)
temp = sp.zeros((sp.amax(array)+1,))
temp[nums] = sp.arange(0, sp.size(nums))
array = temp[array].astype(array.dtype)
return array
| mit |
julienr/vispy | vispy/color/colormap.py | 13 | 38233 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division # just to be safe...
import inspect
import numpy as np
from .color_array import ColorArray
from ..ext.six import string_types
from ..ext.cubehelix import cubehelix
from ..ext.husl import husl_to_rgb
###############################################################################
# Color maps
# Utility functions for interpolation in NumPy.
def _vector_or_scalar(x, type='row'):
"""Convert an object to either a scalar or a row or column vector."""
if isinstance(x, (list, tuple)):
x = np.array(x)
if isinstance(x, np.ndarray):
assert x.ndim == 1
if type == 'column':
x = x[:, None]
return x
def _vector(x, type='row'):
"""Convert an object to a row or column vector."""
if isinstance(x, (list, tuple)):
x = np.array(x, dtype=np.float32)
elif not isinstance(x, np.ndarray):
x = np.array([x], dtype=np.float32)
assert x.ndim == 1
if type == 'column':
x = x[:, None]
return x
def _find_controls(x, controls=None, clip=None):
x_controls = np.clip(np.searchsorted(controls, x) - 1, 0, clip)
return x_controls.astype(np.int32)
# Normalization
def _normalize(x, cmin=None, cmax=None, clip=True):
"""Normalize an array from the range [cmin, cmax] to [0,1],
with optional clipping."""
if not isinstance(x, np.ndarray):
x = np.array(x)
if cmin is None:
cmin = x.min()
if cmax is None:
cmax = x.max()
if cmin == cmax:
return .5 * np.ones(x.shape)
else:
cmin, cmax = float(cmin), float(cmax)
y = (x - cmin) * 1. / (cmax - cmin)
if clip:
y = np.clip(y, 0., 1.)
return y
# Interpolation functions in NumPy.
def _mix_simple(a, b, x):
"""Mix b (with proportion x) with a."""
x = np.clip(x, 0.0, 1.0)
return (1.0 - x)*a + x*b
def _interpolate_multi(colors, x, controls):
x = x.ravel()
n = len(colors)
# For each element in x, the control index of its bin's left boundary.
x_step = _find_controls(x, controls, n-2)
# The length of each bin.
controls_length = np.diff(controls).astype(np.float32)
# Prevent division by zero error.
controls_length[controls_length == 0.] = 1.
# Like x, but relative to each bin.
_to_clip = x - controls[x_step]
_to_clip /= controls_length[x_step]
x_rel = np.clip(_to_clip, 0., 1.)
return (colors[x_step],
colors[x_step + 1],
x_rel[:, None])
def mix(colors, x, controls=None):
a, b, x_rel = _interpolate_multi(colors, x, controls)
return _mix_simple(a, b, x_rel)
def smoothstep(edge0, edge1, x):
""" performs smooth Hermite interpolation
between 0 and 1 when edge0 < x < edge1. """
# Scale, bias and saturate x to 0..1 range
x = np.clip((x - edge0)/(edge1 - edge0), 0.0, 1.0)
# Evaluate polynomial
return x*x*(3 - 2*x)
def step(colors, x, controls=None):
x = x.ravel()
"""Step interpolation from a set of colors. x belongs in [0, 1]."""
assert (controls[0], controls[-1]) == (0., 1.)
ncolors = len(colors)
assert ncolors == len(controls) - 1
assert ncolors >= 2
x_step = _find_controls(x, controls, ncolors-1)
return colors[x_step, ...]
# GLSL interpolation functions.
def _glsl_mix(controls=None):
"""Generate a GLSL template function from a given interpolation patterns
and control points."""
assert (controls[0], controls[-1]) == (0., 1.)
ncolors = len(controls)
assert ncolors >= 2
if ncolors == 2:
s = " return mix($color_0, $color_1, t);\n"
else:
s = ""
for i in range(ncolors-1):
if i == 0:
ifs = 'if (t < %.6f)' % (controls[i+1])
elif i == (ncolors-2):
ifs = 'else'
else:
ifs = 'else if (t < %.6f)' % (controls[i+1])
adj_t = '(t - %s) / %s' % (controls[i],
controls[i+1] - controls[i])
s += ("%s {\n return mix($color_%d, $color_%d, %s);\n} " %
(ifs, i, i+1, adj_t))
return "vec4 colormap(float t) {\n%s\n}" % s
def _glsl_step(controls=None):
assert (controls[0], controls[-1]) == (0., 1.)
ncolors = len(controls) - 1
assert ncolors >= 2
s = ""
for i in range(ncolors-1):
if i == 0:
ifs = 'if (t < %.6f)' % (controls[i+1])
elif i == (ncolors-2):
ifs = 'else'
else:
ifs = 'else if (t < %.6f)' % (controls[i+1])
s += """%s {\n return $color_%d;\n} """ % (ifs, i)
return """vec4 colormap(float t) {\n%s\n}""" % s
# Mini GLSL template system for colors.
def _process_glsl_template(template, colors):
"""Replace $color_i by color #i in the GLSL template."""
for i in range(len(colors) - 1, -1, -1):
color = colors[i]
assert len(color) == 4
vec4_color = 'vec4(%.3f, %.3f, %.3f, %.3f)' % tuple(color)
template = template.replace('$color_%d' % i, vec4_color)
return template
class BaseColormap(object):
"""Class representing a colormap:
t \in [0, 1] --> rgba_color
Parameters
----------
colors : list of lists, tuples, or ndarrays
The control colors used by the colormap (shape = (ncolors, 4)).
Notes
-----
Must be overriden. Child classes need to implement:
glsl_map : string
The GLSL function for the colormap. Use $color_0 to refer
to the first color in `colors`, and so on. These are vec4 vectors.
map(item) : function
Takes a (N, 1) vector of values in [0, 1], and returns a rgba array
of size (N, 4).
"""
# Control colors used by the colormap.
colors = None
# GLSL string with a function implementing the color map.
glsl_map = None
def __init__(self, colors=None):
# Ensure the colors are arrays.
if colors is not None:
self.colors = colors
if not isinstance(self.colors, ColorArray):
self.colors = ColorArray(self.colors)
# Process the GLSL map function by replacing $color_i by the
if len(self.colors) > 0:
self.glsl_map = _process_glsl_template(self.glsl_map,
self.colors.rgba)
def map(self, item):
"""Return a rgba array for the requested items.
This function must be overriden by child classes.
This function doesn't need to implement argument checking on `item`.
It can always assume that `item` is a (N, 1) array of values between
0 and 1.
Parameters
----------
item : ndarray
An array of values in [0,1].
Returns
-------
rgba : ndarray
An array with rgba values, with one color per item. The shape
should be ``item.shape + (4,)``.
Notes
-----
Users are expected to use a colormap with ``__getitem__()`` rather
than ``map()`` (which implements a lower-level API).
"""
raise NotImplementedError()
def __getitem__(self, item):
if isinstance(item, tuple):
raise ValueError('ColorArray indexing is only allowed along '
'the first dimension.')
# Ensure item is either a scalar or a column vector.
item = _vector(item, type='column')
# Clip the values in [0, 1].
item = np.clip(item, 0., 1.)
colors = self.map(item)
return ColorArray(colors)
def __setitem__(self, item, value):
raise RuntimeError("It is not possible to set items to "
"BaseColormap instances.")
def _repr_html_(self):
n = 100
html = ("""
<style>
table.vispy_colormap {
height: 30px;
border: 0;
margin: 0;
padding: 0;
}
table.vispy_colormap td {
width: 3px;
border: 0;
margin: 0;
padding: 0;
}
</style>
<table class="vispy_colormap">
""" +
'\n'.join([(("""<td style="background-color: %s;"
title="%s"></td>""") % (color, color))
for color in self[np.linspace(0., 1., n)].hex]) +
"""
</table>
""")
return html
def _default_controls(ncolors):
"""Generate linearly spaced control points from a set of colors."""
return np.linspace(0., 1., ncolors)
# List the parameters of every supported interpolation mode.
_interpolation_info = {
'linear': {
'ncontrols': lambda ncolors: ncolors, # take ncolors as argument
'glsl_map': _glsl_mix, # take 'controls' as argument
'map': mix,
},
'zero': {
'ncontrols': lambda ncolors: (ncolors+1),
'glsl_map': _glsl_step,
'map': step,
}
}
class Colormap(BaseColormap):
"""A colormap defining several control colors and an interpolation scheme.
Parameters
----------
colors : list of colors | ColorArray
The list of control colors. If not a ``ColorArray``, a new
``ColorArray`` instance is created from this list. See the
documentation of ``ColorArray``.
controls : array-like
The list of control points for the given colors. It should be
an increasing list of floating-point number between 0.0 and 1.0.
The first control point must be 0.0. The last control point must be
1.0. The number of control points depends on the interpolation scheme.
interpolation : str
The interpolation mode of the colormap. Default: 'linear'. Can also
be 'zero'.
If 'linear', ncontrols = ncolors (one color per control point).
If 'zero', ncontrols = ncolors+1 (one color per bin).
Examples
--------
Here is a basic example:
>>> from vispy.color import Colormap
>>> cm = Colormap(['r', 'g', 'b'])
>>> cm[0.], cm[0.5], cm[np.linspace(0., 1., 100)]
"""
def __init__(self, colors, controls=None, interpolation='linear'):
self.interpolation = interpolation
ncontrols = self._ncontrols(len(colors))
# Default controls.
if controls is None:
controls = _default_controls(ncontrols)
assert len(controls) == ncontrols
self._controls = np.array(controls, dtype=np.float32)
self.glsl_map = self._glsl_map_generator(self._controls)
super(Colormap, self).__init__(colors)
@property
def interpolation(self):
"""The interpolation mode of the colormap"""
return self._interpolation
@interpolation.setter
def interpolation(self, val):
if val not in _interpolation_info:
raise ValueError('The interpolation mode can only be one of: ' +
', '.join(sorted(_interpolation_info.keys())))
# Get the information of the interpolation mode.
info = _interpolation_info[val]
# Get the function that generates the GLSL map, as a function of the
# controls array.
self._glsl_map_generator = info['glsl_map']
# Number of controls as a function of the number of colors.
self._ncontrols = info['ncontrols']
# Python map function.
self._map_function = info['map']
self._interpolation = val
def map(self, x):
"""The Python mapping function from the [0,1] interval to a
list of rgba colors
Parameters
----------
x : array-like
The values to map.
Returns
-------
colors : list
List of rgba colors.
"""
return self._map_function(self.colors.rgba, x, self._controls)
class CubeHelixColormap(Colormap):
def __init__(self, start=0.5, rot=1, gamma=1.0, reverse=True, nlev=32,
minSat=1.2, maxSat=1.2, minLight=0., maxLight=1., **kwargs):
"""Cube helix colormap
A full implementation of Dave Green's "cubehelix" for Matplotlib.
Based on the FORTRAN 77 code provided in
D.A. Green, 2011, BASI, 39, 289.
http://adsabs.harvard.edu/abs/2011arXiv1108.5083G
User can adjust all parameters of the cubehelix algorithm.
This enables much greater flexibility in choosing color maps, while
always ensuring the color map scales in intensity from black
to white. A few simple examples:
Default color map settings produce the standard "cubehelix".
Create color map in only blues by setting rot=0 and start=0.
Create reverse (white to black) backwards through the rainbow once
by setting rot=1 and reverse=True.
Parameters
----------
start : scalar, optional
Sets the starting position in the color space. 0=blue, 1=red,
2=green. Defaults to 0.5.
rot : scalar, optional
The number of rotations through the rainbow. Can be positive
or negative, indicating direction of rainbow. Negative values
correspond to Blue->Red direction. Defaults to -1.5
gamma : scalar, optional
The gamma correction for intensity. Defaults to 1.0
reverse : boolean, optional
Set to True to reverse the color map. Will go from black to
white. Good for density plots where shade~density. Defaults to
False
nlev : scalar, optional
Defines the number of discrete levels to render colors at.
Defaults to 32.
sat : scalar, optional
The saturation intensity factor. Defaults to 1.2
NOTE: this was formerly known as "hue" parameter
minSat : scalar, optional
Sets the minimum-level saturation. Defaults to 1.2
maxSat : scalar, optional
Sets the maximum-level saturation. Defaults to 1.2
startHue : scalar, optional
Sets the starting color, ranging from [0, 360], as in
D3 version by @mbostock
NOTE: overrides values in start parameter
endHue : scalar, optional
Sets the ending color, ranging from [0, 360], as in
D3 version by @mbostock
NOTE: overrides values in rot parameter
minLight : scalar, optional
Sets the minimum lightness value. Defaults to 0.
maxLight : scalar, optional
Sets the maximum lightness value. Defaults to 1.
"""
super(CubeHelixColormap, self).__init__(
cubehelix(start=start, rot=rot, gamma=gamma, reverse=reverse,
nlev=nlev, minSat=minSat, maxSat=maxSat,
minLight=minLight, maxLight=maxLight, **kwargs))
class _Fire(BaseColormap):
colors = [(1.0, 1.0, 1.0, 1.0),
(1.0, 1.0, 0.0, 1.0),
(1.0, 0.0, 0.0, 1.0)]
glsl_map = """
vec4 fire(float t) {
return mix(mix($color_0, $color_1, t),
mix($color_1, $color_2, t*t), t);
}
"""
def map(self, t):
a, b, d = self.colors.rgba
c = _mix_simple(a, b, t)
e = _mix_simple(b, d, t**2)
return _mix_simple(c, e, t)
class _Grays(BaseColormap):
glsl_map = """
vec4 grays(float t) {
return vec4(t, t, t, 1.0);
}
"""
def map(self, t):
if isinstance(t, np.ndarray):
return np.hstack([t, t, t, np.ones(t.shape)]).astype(np.float32)
else:
return np.array([t, t, t, 1.0], dtype=np.float32)
class _Ice(BaseColormap):
glsl_map = """
vec4 ice(float t) {
return vec4(t, t, 1.0, 1.0);
}
"""
def map(self, t):
if isinstance(t, np.ndarray):
return np.hstack([t, t, np.ones(t.shape),
np.ones(t.shape)]).astype(np.float32)
else:
return np.array([t, t, 1.0, 1.0], dtype=np.float32)
class _Hot(BaseColormap):
colors = [(0., .33, .66, 1.0),
(.33, .66, 1., 1.0)]
glsl_map = """
vec4 hot(float t) {
return vec4(smoothstep($color_0.rgb, $color_1.rgb, vec3(t, t, t)),
1.0);
}
"""
def map(self, t):
rgba = self.colors.rgba
smoothed = smoothstep(rgba[0, :3], rgba[1, :3], t)
return np.hstack((smoothed, np.ones((len(t), 1))))
class _Winter(BaseColormap):
colors = [(0.0, 0.0, 1.0, 1.0),
(0.0, 1.0, 0.5, 1.0)]
glsl_map = """
vec4 winter(float t) {
return mix($color_0, $color_1, sqrt(t));
}
"""
def map(self, t):
return _mix_simple(self.colors.rgba[0],
self.colors.rgba[1],
np.sqrt(t))
class _SingleHue(Colormap):
"""A colormap which is solely defined by the given hue and value.
Given the color hue and value, this color map increases the saturation
of a color. The start color is almost white but still contains a hint of
the given color, and at the end the color is fully saturated.
Parameters
----------
hue : scalar, optional
The hue refers to a "true" color, without any shading or tinting.
Must be in the range [0, 360]. Defaults to 200 (blue).
saturation_range : array-like, optional
The saturation represents how "pure" a color is. Less saturation means
more white light mixed in the color. A fully saturated color means
the pure color defined by the hue. No saturation means completely
white. This colormap changes the saturation, and with this parameter
you can specify the lower and upper bound. Default is [0.2, 0.8].
value : scalar, optional
The value defines the "brightness" of a color: a value of 0.0 means
completely black while a value of 1.0 means the color defined by the
hue without shading. Must be in the range [0, 1.0]. The default value
is 1.0.
Notes
-----
For more information about the hue values see the `wikipedia page`_.
.. _wikipedia page: https://en.wikipedia.org/wiki/Hue
"""
def __init__(self, hue=200, saturation_range=[0.1, 0.8], value=1.0):
colors = ColorArray([
(hue, saturation_range[0], value),
(hue, saturation_range[1], value)
], color_space='hsv')
super(_SingleHue, self).__init__(colors)
class _HSL(Colormap):
"""A colormap which is defined by n evenly spaced points in
a circular color space.
This means that we change the hue value while keeping the
saturation and value constant.
Parameters
---------
n_colors : int, optional
The number of colors to generate.
hue_start : int, optional
The hue start value. Must be in the range [0, 360], the default is 0.
saturation : float, optional
The saturation component of the colors to generate. The default is
fully saturated (1.0). Must be in the range [0, 1.0].
value : float, optional
The value (brightness) component of the colors to generate. Must
be in the range [0, 1.0], and the default is 1.0
controls : array-like, optional
The list of control points for the colors to generate. It should be
an increasing list of floating-point number between 0.0 and 1.0.
The first control point must be 0.0. The last control point must be
1.0. The number of control points depends on the interpolation scheme.
interpolation : str, optional
The interpolation mode of the colormap. Default: 'linear'. Can also
be 'zero'.
If 'linear', ncontrols = ncolors (one color per control point).
If 'zero', ncontrols = ncolors+1 (one color per bin).
"""
def __init__(self, ncolors=6, hue_start=0, saturation=1.0, value=1.0,
controls=None, interpolation='linear'):
hues = np.linspace(0, 360, ncolors + 1)[:-1]
hues += hue_start
hues %= 360
colors = ColorArray([(hue, saturation, value) for hue in hues],
color_space='hsv')
super(_HSL, self).__init__(colors, controls=controls,
interpolation=interpolation)
class _HUSL(Colormap):
"""A colormap which is defined by n evenly spaced points in
the HUSL hue space.
Parameters
---------
n_colors : int, optional
The number of colors to generate.
hue_start : int, optional
The hue start value. Must be in the range [0, 360], the default is 0.
saturation : float, optional
The saturation component of the colors to generate. The default is
fully saturated (1.0). Must be in the range [0, 1.0].
value : float, optional
The value component of the colors to generate or "brightness". Must
be in the range [0, 1.0], and the default is 0.7.
controls : array-like, optional
The list of control points for the colors to generate. It should be
an increasing list of floating-point number between 0.0 and 1.0.
The first control point must be 0.0. The last control point must be
1.0. The number of control points depends on the interpolation scheme.
interpolation : str, optional
The interpolation mode of the colormap. Default: 'linear'. Can also
be 'zero'.
If 'linear', ncontrols = ncolors (one color per control point).
If 'zero', ncontrols = ncolors+1 (one color per bin).
Notes
-----
For more information about HUSL colors see http://husl-colors.org
"""
def __init__(self, ncolors=6, hue_start=0, saturation=1.0, value=0.7,
controls=None, interpolation='linear'):
hues = np.linspace(0, 360, ncolors + 1)[:-1]
hues += hue_start
hues %= 360
saturation *= 99
value *= 99
colors = ColorArray(
[husl_to_rgb(hue, saturation, value) for hue in hues],
)
super(_HUSL, self).__init__(colors, controls=controls,
interpolation=interpolation)
class _Diverging(Colormap):
def __init__(self, h_pos=20, h_neg=250, saturation=1.0, value=0.7,
center="light"):
saturation *= 99
value *= 99
start = husl_to_rgb(h_neg, saturation, value)
mid = ((0.133, 0.133, 0.133) if center == "dark" else
(0.92, 0.92, 0.92))
end = husl_to_rgb(h_pos, saturation, value)
colors = ColorArray([start, mid, end])
super(_Diverging, self).__init__(colors)
# https://github.com/matplotlib/matplotlib/pull/4707/files#diff-893cf0348279e9f4570488a7a297ab1eR774
# Taken from original Viridis colormap data in matplotlib implementation
# Sampled 128 points from the raw data-set of 256 samples.
# Sub sampled to 128 points since 256 points causes VisPy to freeze.
# HACK: Ideally, all 256 points should be included, with VisPy generating
# a 1D texture lookup for ColorMap, rather than branching code.
_viridis_data = [[0.267004, 0.004874, 0.329415],
[0.268510, 0.009605, 0.335427],
[0.269944, 0.014625, 0.341379],
[0.271305, 0.019942, 0.347269],
[0.272594, 0.025563, 0.353093],
[0.273809, 0.031497, 0.358853],
[0.274952, 0.037752, 0.364543],
[0.276022, 0.044167, 0.370164],
[0.277018, 0.050344, 0.375715],
[0.277941, 0.056324, 0.381191],
[0.278791, 0.062145, 0.386592],
[0.279566, 0.067836, 0.391917],
[0.280267, 0.073417, 0.397163],
[0.280894, 0.078907, 0.402329],
[0.281446, 0.084320, 0.407414],
[0.281924, 0.089666, 0.412415],
[0.282327, 0.094955, 0.417331],
[0.282656, 0.100196, 0.422160],
[0.282910, 0.105393, 0.426902],
[0.283091, 0.110553, 0.431554],
[0.283197, 0.115680, 0.436115],
[0.283229, 0.120777, 0.440584],
[0.283187, 0.125848, 0.444960],
[0.283072, 0.130895, 0.449241],
[0.282884, 0.135920, 0.453427],
[0.282623, 0.140926, 0.457517],
[0.282290, 0.145912, 0.461510],
[0.281887, 0.150881, 0.465405],
[0.281412, 0.155834, 0.469201],
[0.280868, 0.160771, 0.472899],
[0.280255, 0.165693, 0.476498],
[0.279574, 0.170599, 0.479997],
[0.278826, 0.175490, 0.483397],
[0.278012, 0.180367, 0.486697],
[0.277134, 0.185228, 0.489898],
[0.276194, 0.190074, 0.493001],
[0.275191, 0.194905, 0.496005],
[0.274128, 0.199721, 0.498911],
[0.273006, 0.204520, 0.501721],
[0.271828, 0.209303, 0.504434],
[0.270595, 0.214069, 0.507052],
[0.269308, 0.218818, 0.509577],
[0.267968, 0.223549, 0.512008],
[0.266580, 0.228262, 0.514349],
[0.265145, 0.232956, 0.516599],
[0.263663, 0.237631, 0.518762],
[0.262138, 0.242286, 0.520837],
[0.260571, 0.246922, 0.522828],
[0.258965, 0.251537, 0.524736],
[0.257322, 0.256130, 0.526563],
[0.255645, 0.260703, 0.528312],
[0.253935, 0.265254, 0.529983],
[0.252194, 0.269783, 0.531579],
[0.250425, 0.274290, 0.533103],
[0.248629, 0.278775, 0.534556],
[0.246811, 0.283237, 0.535941],
[0.244972, 0.287675, 0.537260],
[0.243113, 0.292092, 0.538516],
[0.241237, 0.296485, 0.539709],
[0.239346, 0.300855, 0.540844],
[0.237441, 0.305202, 0.541921],
[0.235526, 0.309527, 0.542944],
[0.233603, 0.313828, 0.543914],
[0.231674, 0.318106, 0.544834],
[0.229739, 0.322361, 0.545706],
[0.227802, 0.326594, 0.546532],
[0.225863, 0.330805, 0.547314],
[0.223925, 0.334994, 0.548053],
[0.221989, 0.339161, 0.548752],
[0.220057, 0.343307, 0.549413],
[0.218130, 0.347432, 0.550038],
[0.216210, 0.351535, 0.550627],
[0.214298, 0.355619, 0.551184],
[0.212395, 0.359683, 0.551710],
[0.210503, 0.363727, 0.552206],
[0.208623, 0.367752, 0.552675],
[0.206756, 0.371758, 0.553117],
[0.204903, 0.375746, 0.553533],
[0.203063, 0.379716, 0.553925],
[0.201239, 0.383670, 0.554294],
[0.199430, 0.387607, 0.554642],
[0.197636, 0.391528, 0.554969],
[0.195860, 0.395433, 0.555276],
[0.194100, 0.399323, 0.555565],
[0.192357, 0.403199, 0.555836],
[0.190631, 0.407061, 0.556089],
[0.188923, 0.410910, 0.556326],
[0.187231, 0.414746, 0.556547],
[0.185556, 0.418570, 0.556753],
[0.183898, 0.422383, 0.556944],
[0.182256, 0.426184, 0.557120],
[0.180629, 0.429975, 0.557282],
[0.179019, 0.433756, 0.557430],
[0.177423, 0.437527, 0.557565],
[0.175841, 0.441290, 0.557685],
[0.174274, 0.445044, 0.557792],
[0.172719, 0.448791, 0.557885],
[0.171176, 0.452530, 0.557965],
[0.169646, 0.456262, 0.558030],
[0.168126, 0.459988, 0.558082],
[0.166617, 0.463708, 0.558119],
[0.165117, 0.467423, 0.558141],
[0.163625, 0.471133, 0.558148],
[0.162142, 0.474838, 0.558140],
[0.160665, 0.478540, 0.558115],
[0.159194, 0.482237, 0.558073],
[0.157729, 0.485932, 0.558013],
[0.156270, 0.489624, 0.557936],
[0.154815, 0.493313, 0.557840],
[0.153364, 0.497000, 0.557724],
[0.151918, 0.500685, 0.557587],
[0.150476, 0.504369, 0.557430],
[0.149039, 0.508051, 0.557250],
[0.147607, 0.511733, 0.557049],
[0.146180, 0.515413, 0.556823],
[0.144759, 0.519093, 0.556572],
[0.143343, 0.522773, 0.556295],
[0.141935, 0.526453, 0.555991],
[0.140536, 0.530132, 0.555659],
[0.139147, 0.533812, 0.555298],
[0.137770, 0.537492, 0.554906],
[0.136408, 0.541173, 0.554483],
[0.135066, 0.544853, 0.554029],
[0.133743, 0.548535, 0.553541],
[0.132444, 0.552216, 0.553018],
[0.131172, 0.555899, 0.552459],
[0.129933, 0.559582, 0.551864],
[0.128729, 0.563265, 0.551229],
[0.127568, 0.566949, 0.550556],
[0.126453, 0.570633, 0.549841],
[0.125394, 0.574318, 0.549086],
[0.124395, 0.578002, 0.548287],
[0.123463, 0.581687, 0.547445],
[0.122606, 0.585371, 0.546557],
[0.121831, 0.589055, 0.545623],
[0.121148, 0.592739, 0.544641],
[0.120565, 0.596422, 0.543611],
[0.120092, 0.600104, 0.542530],
[0.119738, 0.603785, 0.541400],
[0.119512, 0.607464, 0.540218],
[0.119423, 0.611141, 0.538982],
[0.119483, 0.614817, 0.537692],
[0.119699, 0.618490, 0.536347],
[0.120081, 0.622161, 0.534946],
[0.120638, 0.625828, 0.533488],
[0.121380, 0.629492, 0.531973],
[0.122312, 0.633153, 0.530398],
[0.123444, 0.636809, 0.528763],
[0.124780, 0.640461, 0.527068],
[0.126326, 0.644107, 0.525311],
[0.128087, 0.647749, 0.523491],
[0.130067, 0.651384, 0.521608],
[0.132268, 0.655014, 0.519661],
[0.134692, 0.658636, 0.517649],
[0.137339, 0.662252, 0.515571],
[0.140210, 0.665859, 0.513427],
[0.143303, 0.669459, 0.511215],
[0.146616, 0.673050, 0.508936],
[0.150148, 0.676631, 0.506589],
[0.153894, 0.680203, 0.504172],
[0.157851, 0.683765, 0.501686],
[0.162016, 0.687316, 0.499129],
[0.166383, 0.690856, 0.496502],
[0.170948, 0.694384, 0.493803],
[0.175707, 0.697900, 0.491033],
[0.180653, 0.701402, 0.488189],
[0.185783, 0.704891, 0.485273],
[0.191090, 0.708366, 0.482284],
[0.196571, 0.711827, 0.479221],
[0.202219, 0.715272, 0.476084],
[0.208030, 0.718701, 0.472873],
[0.214000, 0.722114, 0.469588],
[0.220124, 0.725509, 0.466226],
[0.226397, 0.728888, 0.462789],
[0.232815, 0.732247, 0.459277],
[0.239374, 0.735588, 0.455688],
[0.246070, 0.738910, 0.452024],
[0.252899, 0.742211, 0.448284],
[0.259857, 0.745492, 0.444467],
[0.266941, 0.748751, 0.440573],
[0.274149, 0.751988, 0.436601],
[0.281477, 0.755203, 0.432552],
[0.288921, 0.758394, 0.428426],
[0.296479, 0.761561, 0.424223],
[0.304148, 0.764704, 0.419943],
[0.311925, 0.767822, 0.415586],
[0.319809, 0.770914, 0.411152],
[0.327796, 0.773980, 0.406640],
[0.335885, 0.777018, 0.402049],
[0.344074, 0.780029, 0.397381],
[0.352360, 0.783011, 0.392636],
[0.360741, 0.785964, 0.387814],
[0.369214, 0.788888, 0.382914],
[0.377779, 0.791781, 0.377939],
[0.386433, 0.794644, 0.372886],
[0.395174, 0.797475, 0.367757],
[0.404001, 0.800275, 0.362552],
[0.412913, 0.803041, 0.357269],
[0.421908, 0.805774, 0.351910],
[0.430983, 0.808473, 0.346476],
[0.440137, 0.811138, 0.340967],
[0.449368, 0.813768, 0.335384],
[0.458674, 0.816363, 0.329727],
[0.468053, 0.818921, 0.323998],
[0.477504, 0.821444, 0.318195],
[0.487026, 0.823929, 0.312321],
[0.496615, 0.826376, 0.306377],
[0.506271, 0.828786, 0.300362],
[0.515992, 0.831158, 0.294279],
[0.525776, 0.833491, 0.288127],
[0.535621, 0.835785, 0.281908],
[0.545524, 0.838039, 0.275626],
[0.555484, 0.840254, 0.269281],
[0.565498, 0.842430, 0.262877],
[0.575563, 0.844566, 0.256415],
[0.585678, 0.846661, 0.249897],
[0.595839, 0.848717, 0.243329],
[0.606045, 0.850733, 0.236712],
[0.616293, 0.852709, 0.230052],
[0.626579, 0.854645, 0.223353],
[0.636902, 0.856542, 0.216620],
[0.647257, 0.858400, 0.209861],
[0.657642, 0.860219, 0.203082],
[0.668054, 0.861999, 0.196293],
[0.678489, 0.863742, 0.189503],
[0.688944, 0.865448, 0.182725],
[0.699415, 0.867117, 0.175971],
[0.709898, 0.868751, 0.169257],
[0.720391, 0.870350, 0.162603],
[0.730889, 0.871916, 0.156029],
[0.741388, 0.873449, 0.149561],
[0.751884, 0.874951, 0.143228],
[0.762373, 0.876424, 0.137064],
[0.772852, 0.877868, 0.131109],
[0.783315, 0.879285, 0.125405],
[0.793760, 0.880678, 0.120005],
[0.804182, 0.882046, 0.114965],
[0.814576, 0.883393, 0.110347],
[0.824940, 0.884720, 0.106217],
[0.835270, 0.886029, 0.102646],
[0.845561, 0.887322, 0.099702],
[0.855810, 0.888601, 0.097452],
[0.866013, 0.889868, 0.095953],
[0.876168, 0.891125, 0.095250],
[0.886271, 0.892374, 0.095374],
[0.896320, 0.893616, 0.096335],
[0.906311, 0.894855, 0.098125],
[0.916242, 0.896091, 0.100717],
[0.926106, 0.897330, 0.104071],
[0.935904, 0.898570, 0.108131],
[0.945636, 0.899815, 0.112838],
[0.955300, 0.901065, 0.118128],
[0.964894, 0.902323, 0.123941],
[0.974417, 0.903590, 0.130215],
[0.983868, 0.904867, 0.136897],
[0.993248, 0.906157, 0.143936]]
_colormaps = dict(
# Some colormap presets
autumn=Colormap([(1., 0., 0., 1.), (1., 1., 0., 1.)]),
blues=Colormap([(1., 1., 1., 1.), (0., 0., 1., 1.)]),
cool=Colormap([(0., 1., 1., 1.), (1., 0., 1., 1.)]),
greens=Colormap([(1., 1., 1., 1.), (0., 1., 0., 1.)]),
reds=Colormap([(1., 1., 1., 1.), (1., 0., 0., 1.)]),
spring=Colormap([(1., 0., 1., 1.), (1., 1., 0., 1.)]),
summer=Colormap([(0., .5, .4, 1.), (1., 1., .4, 1.)]),
fire=_Fire(),
grays=_Grays(),
hot=_Hot(),
ice=_Ice(),
winter=_Winter(),
light_blues=_SingleHue(),
orange=_SingleHue(hue=35),
viridis=Colormap(ColorArray(_viridis_data[::2])),
# Diverging presets
coolwarm=Colormap(ColorArray(
[
(226, 0.59, 0.92), (222, 0.44, 0.99), (218, 0.26, 0.97),
(30, 0.01, 0.87),
(20, 0.3, 0.96), (15, 0.5, 0.95), (8, 0.66, 0.86)
],
color_space="hsv"
)),
PuGr=_Diverging(145, 280, 0.85, 0.30),
GrBu=_Diverging(255, 133, 0.75, 0.6),
GrBu_d=_Diverging(255, 133, 0.75, 0.6, "dark"),
RdBu=_Diverging(220, 20, 0.75, 0.5),
# Configurable colormaps
cubehelix=CubeHelixColormap,
single_hue=_SingleHue,
hsl=_HSL,
husl=_HUSL,
diverging=_Diverging
)
def get_colormap(name, *args, **kwargs):
"""Obtain a colormap
Some colormaps can have additional configuration parameters. Refer to
their corresponding documentation for more information.
Parameters
----------
name : str | Colormap
Colormap name. Can also be a Colormap for pass-through.
Examples
--------
>>> get_colormap('autumn')
>>> get_colormap('single_hue', hue=10)
"""
if isinstance(name, BaseColormap):
cmap = name
else:
if not isinstance(name, string_types):
raise TypeError('colormap must be a Colormap or string name')
if name not in _colormaps:
raise KeyError('colormap name %s not found' % name)
cmap = _colormaps[name]
if inspect.isclass(cmap):
cmap = cmap(*args, **kwargs)
return cmap
def get_colormaps():
"""Return the list of colormap names."""
return _colormaps.copy()
| bsd-3-clause |
danking/hail | hail/python/hail/methods/statgen.py | 1 | 151412 | import itertools
import math
import numpy as np
from typing import Dict, Callable
import builtins
import hail
import hail as hl
import hail.expr.aggregators as agg
from hail.expr import (Expression, ExpressionException, expr_float64, expr_call,
expr_any, expr_numeric, expr_locus, analyze, check_entry_indexed,
check_row_indexed, matrix_table_source, table_source)
from hail.expr.types import tbool, tarray, tfloat64, tint32
from hail import ir
from hail.genetics.reference_genome import reference_genome_type
from hail.linalg import BlockMatrix
from hail.matrixtable import MatrixTable
from hail.methods.misc import require_biallelic, require_row_key_variant
from hail.stats import LinearMixedModel
from hail.table import Table
from hail.typecheck import (typecheck, nullable, numeric, oneof, sequenceof,
enumeration, anytype)
from hail.utils import wrap_to_list, new_temp_file, FatalError
from hail.utils.java import Env, info, warning
from . import relatedness
from . import pca
from ..backend.spark_backend import SparkBackend
pc_relate = relatedness.pc_relate
identity_by_descent = relatedness.identity_by_descent
_blanczos_pca = pca._blanczos_pca
_hwe_normalized_blanczos = pca._hwe_normalized_blanczos
hwe_normalized_pca = pca.hwe_normalized_pca
pca = pca.pca
@typecheck(call=expr_call,
aaf_threshold=numeric,
include_par=bool,
female_threshold=numeric,
male_threshold=numeric,
aaf=nullable(str))
def impute_sex(call, aaf_threshold=0.0, include_par=False, female_threshold=0.2, male_threshold=0.8, aaf=None) -> Table:
r"""Impute sex of samples by calculating inbreeding coefficient on the
X chromosome.
.. include:: ../_templates/req_tvariant.rst
.. include:: ../_templates/req_biallelic.rst
Examples
--------
Remove samples where imputed sex does not equal reported sex:
>>> imputed_sex = hl.impute_sex(dataset.GT)
>>> dataset_result = dataset.filter_cols(imputed_sex[dataset.s].is_female != dataset.pheno.is_female,
... keep=False)
Notes
-----
We have used the same implementation as `PLINK v1.7
<https://zzz.bwh.harvard.edu/plink/summary.shtml#sexcheck>`__.
Let `gr` be the the reference genome of the type of the `locus` key (as
given by :attr:`.tlocus.reference_genome`)
1. Filter the dataset to loci on the X contig defined by `gr`.
2. Calculate alternate allele frequency (AAF) for each row from the dataset.
3. Filter to variants with AAF above `aaf_threshold`.
4. Remove loci in the pseudoautosomal region, as defined by `gr`, unless
`include_par` is ``True`` (it defaults to ``False``)
5. For each row and column with a non-missing genotype call, :math:`E`, the
expected number of homozygotes (from population AAF), is computed as
:math:`1.0 - (2.0*\mathrm{maf}*(1.0-\mathrm{maf}))`.
6. For each row and column with a non-missing genotype call, :math:`O`, the
observed number of homozygotes, is computed interpreting ``0`` as
heterozygote and ``1`` as homozygote`
7. For each row and column with a non-missing genotype call, :math:`N` is
incremented by 1
8. For each column, :math:`E`, :math:`O`, and :math:`N` are combined across
variants
9. For each column, :math:`F` is calculated by :math:`(O - E) / (N - E)`
10. A sex is assigned to each sample with the following criteria:
- Female when ``F < 0.2``
- Male when ``F > 0.8``
Use `female_threshold` and `male_threshold` to change this behavior.
**Annotations**
The returned column-key indexed :class:`.Table` has the following fields in
addition to the matrix table's column keys:
- **is_female** (:py:data:`.tbool`) -- True if the imputed sex is female,
false if male, missing if undetermined.
- **f_stat** (:py:data:`.tfloat64`) -- Inbreeding coefficient.
- **n_called** (:py:data:`.tint64`) -- Number of variants with a genotype call.
- **expected_homs** (:py:data:`.tfloat64`) -- Expected number of homozygotes.
- **observed_homs** (:py:data:`.tint64`) -- Observed number of homozygotes.
call : :class:`.CallExpression`
A genotype call for each row and column. The source dataset's row keys
must be [[locus], alleles] with types :class:`.tlocus` and
:class:`.tarray` of :obj:`.tstr`. Moreover, the alleles array must have
exactly two elements (i.e. the variant must be biallelic).
aaf_threshold : :obj:`float`
Minimum alternate allele frequency threshold.
include_par : :obj:`bool`
Include pseudoautosomal regions.
female_threshold : :obj:`float`
Samples are called females if F < female_threshold.
male_threshold : :obj:`float`
Samples are called males if F > male_threshold.
aaf : :class:`str` or :obj:`None`
A field defining the alternate allele frequency for each row. If
``None``, AAF will be computed from `call`.
Return
------
:class:`.Table`
Sex imputation statistics per sample.
"""
if aaf_threshold < 0.0 or aaf_threshold > 1.0:
raise FatalError("Invalid argument for `aaf_threshold`. Must be in range [0, 1].")
mt = call._indices.source
mt, _ = mt._process_joins(call)
mt = mt.annotate_entries(call=call)
mt = require_biallelic(mt, 'impute_sex')
if (aaf is None):
mt = mt.annotate_rows(aaf=agg.call_stats(mt.call, mt.alleles).AF[1])
aaf = 'aaf'
rg = mt.locus.dtype.reference_genome
mt = hl.filter_intervals(mt,
hl.map(lambda x_contig: hl.parse_locus_interval(x_contig, rg), rg.x_contigs),
keep=True)
if not include_par:
interval_type = hl.tarray(hl.tinterval(hl.tlocus(rg)))
mt = hl.filter_intervals(mt,
hl.literal(rg.par, interval_type),
keep=False)
mt = mt.filter_rows((mt[aaf] > aaf_threshold) & (mt[aaf] < (1 - aaf_threshold)))
mt = mt.annotate_cols(ib=agg.inbreeding(mt.call, mt[aaf]))
kt = mt.select_cols(
is_female=hl.if_else(mt.ib.f_stat < female_threshold,
True,
hl.if_else(mt.ib.f_stat > male_threshold,
False,
hl.missing(tbool))),
**mt.ib).cols()
return kt
def _get_regression_row_fields(mt, pass_through, method) -> Dict[str, str]:
row_fields = dict(zip(mt.row_key.keys(), mt.row_key.keys()))
for f in pass_through:
if isinstance(f, str):
if f not in mt.row:
raise ValueError(f"'{method}/pass_through': MatrixTable has no row field {repr(f)}")
if f in row_fields:
# allow silent pass through of key fields
if f in mt.row_key:
pass
else:
raise ValueError(f"'{method}/pass_through': found duplicated field {repr(f)}")
row_fields[f] = mt[f]
else:
assert isinstance(f, Expression)
if not f._ir.is_nested_field:
raise ValueError(f"'{method}/pass_through': expect fields or nested fields, not complex expressions")
if not f._indices == mt._row_indices:
raise ExpressionException(f"'{method}/pass_through': require row-indexed fields, found indices {f._indices.axes}")
name = f._ir.name
if name in row_fields:
# allow silent pass through of key fields
if not (name in mt.row_key and f._ir == mt[name]._ir):
raise ValueError(f"'{method}/pass_through': found duplicated field {repr(name)}")
row_fields[name] = f
for k in mt.row_key:
del row_fields[k]
return row_fields
@typecheck(y=oneof(expr_float64, sequenceof(expr_float64), sequenceof(sequenceof(expr_float64))),
x=expr_float64,
covariates=sequenceof(expr_float64),
block_size=int,
pass_through=sequenceof(oneof(str, Expression)))
def linear_regression_rows(y, x, covariates, block_size=16, pass_through=()) -> hail.Table:
r"""For each row, test an input variable for association with
response variables using linear regression.
Examples
--------
>>> result_ht = hl.linear_regression_rows(
... y=dataset.pheno.height,
... x=dataset.GT.n_alt_alleles(),
... covariates=[1, dataset.pheno.age, dataset.pheno.is_female])
Warning
-------
As in the example, the intercept covariate ``1`` must be
included **explicitly** if desired.
Warning
-------
If `y` is a single value or a list, :func:`.linear_regression_rows`
considers the same set of columns (i.e., samples, points) for every response
variable and row, namely those columns for which **all** response variables
and covariates are defined.
If `y` is a list of lists, then each inner list is treated as an
independent group, subsetting columns for missingness separately.
Notes
-----
With the default root and `y` a single expression, the following row-indexed
fields are added.
- **<row key fields>** (Any) -- Row key fields.
- **<pass_through fields>** (Any) -- Row fields in `pass_through`.
- **n** (:py:data:`.tint32`) -- Number of columns used.
- **sum_x** (:py:data:`.tfloat64`) -- Sum of input values `x`.
- **y_transpose_x** (:py:data:`.tfloat64`) -- Dot product of response
vector `y` with the input vector `x`.
- **beta** (:py:data:`.tfloat64`) --
Fit effect coefficient of `x`, :math:`\hat\beta_1` below.
- **standard_error** (:py:data:`.tfloat64`) --
Estimated standard error, :math:`\widehat{\mathrm{se}}_1`.
- **t_stat** (:py:data:`.tfloat64`) -- :math:`t`-statistic, equal to
:math:`\hat\beta_1 / \widehat{\mathrm{se}}_1`.
- **p_value** (:py:data:`.tfloat64`) -- :math:`p`-value.
If `y` is a list of expressions, then the last five fields instead have type
:class:`.tarray` of :py:data:`.tfloat64`, with corresponding indexing of
the list and each array.
If `y` is a list of lists of expressions, then `n` and `sum_x` are of type
``array<float64>``, and the last five fields are of type
``array<array<float64>>``. Index into these arrays with
``a[index_in_outer_list, index_in_inner_list]``. For example, if
``y=[[a], [b, c]]`` then the p-value for ``b`` is ``p_value[1][0]``.
In the statistical genetics example above, the input variable `x` encodes
genotype as the number of alternate alleles (0, 1, or 2). For each variant
(row), genotype is tested for association with height controlling for age
and sex, by fitting the linear regression model:
.. math::
\mathrm{height} = \beta_0 + \beta_1 \, \mathrm{genotype}
+ \beta_2 \, \mathrm{age}
+ \beta_3 \, \mathrm{is\_female}
+ \varepsilon,
\quad
\varepsilon \sim \mathrm{N}(0, \sigma^2)
Boolean covariates like :math:`\mathrm{is\_female}` are encoded as 1 for
``True`` and 0 for ``False``. The null model sets :math:`\beta_1 = 0`.
The standard least-squares linear regression model is derived in Section
3.2 of `The Elements of Statistical Learning, 2nd Edition
<http://statweb.stanford.edu/~tibs/ElemStatLearn/printings/ESLII_print10.pdf>`__.
See equation 3.12 for the t-statistic which follows the t-distribution with
:math:`n - k - 1` degrees of freedom, under the null hypothesis of no
effect, with :math:`n` samples and :math:`k` covariates in addition to
``x``.
Note
----
Use the `pass_through` parameter to include additional row fields from
matrix table underlying ``x``. For example, to include an "rsid" field, set
``pass_through=['rsid']`` or ``pass_through=[mt.rsid]``.
Parameters
----------
y : :class:`.Float64Expression` or :obj:`list` of :class:`.Float64Expression`
One or more column-indexed response expressions.
x : :class:`.Float64Expression`
Entry-indexed expression for input variable.
covariates : :obj:`list` of :class:`.Float64Expression`
List of column-indexed covariate expressions.
block_size : :obj:`int`
Number of row regressions to perform simultaneously per core. Larger blocks
require more memory but may improve performance.
pass_through : :obj:`list` of :class:`str` or :class:`.Expression`
Additional row fields to include in the resulting table.
Returns
-------
:class:`.Table`
"""
if not isinstance(Env.backend(), SparkBackend):
return _linear_regression_rows_nd(y, x, covariates, block_size, pass_through)
mt = matrix_table_source('linear_regression_rows/x', x)
check_entry_indexed('linear_regression_rows/x', x)
y_is_list = isinstance(y, list)
if y_is_list and len(y) == 0:
raise ValueError("'linear_regression_rows': found no values for 'y'")
is_chained = y_is_list and isinstance(y[0], list)
if is_chained and any(len(lst) == 0 for lst in y):
raise ValueError("'linear_regression_rows': found empty inner list for 'y'")
y = wrap_to_list(y)
for e in (itertools.chain.from_iterable(y) if is_chained else y):
analyze('linear_regression_rows/y', e, mt._col_indices)
for e in covariates:
analyze('linear_regression_rows/covariates', e, mt._col_indices)
_warn_if_no_intercept('linear_regression_rows', covariates)
x_field_name = Env.get_uid()
if is_chained:
y_field_names = [[f'__y_{i}_{j}' for j in range(len(y[i]))] for i in range(len(y))]
y_dict = dict(zip(itertools.chain.from_iterable(y_field_names), itertools.chain.from_iterable(y)))
func = 'LinearRegressionRowsChained'
else:
y_field_names = list(f'__y_{i}' for i in range(len(y)))
y_dict = dict(zip(y_field_names, y))
func = 'LinearRegressionRowsSingle'
cov_field_names = list(f'__cov{i}' for i in range(len(covariates)))
row_fields = _get_regression_row_fields(mt, pass_through, 'linear_regression_rows')
# FIXME: selecting an existing entry field should be emitted as a SelectFields
mt = mt._select_all(col_exprs=dict(**y_dict,
**dict(zip(cov_field_names, covariates))),
row_exprs=row_fields,
col_key=[],
entry_exprs={x_field_name: x})
config = {
'name': func,
'yFields': y_field_names,
'xField': x_field_name,
'covFields': cov_field_names,
'rowBlockSize': block_size,
'passThrough': [x for x in row_fields if x not in mt.row_key]
}
ht_result = Table(ir.MatrixToTableApply(mt._mir, config))
if not y_is_list:
fields = ['y_transpose_x', 'beta', 'standard_error', 't_stat', 'p_value']
ht_result = ht_result.annotate(**{f: ht_result[f][0] for f in fields})
return ht_result.persist()
@typecheck(y=oneof(expr_float64, sequenceof(expr_float64), sequenceof(sequenceof(expr_float64))),
x=expr_float64,
covariates=sequenceof(expr_float64),
block_size=int,
pass_through=sequenceof(oneof(str, Expression)))
def _linear_regression_rows_nd(y, x, covariates, block_size=16, pass_through=()) -> hail.Table:
mt = matrix_table_source('linear_regression_rows_nd/x', x)
check_entry_indexed('linear_regression_rows_nd/x', x)
y_is_list = isinstance(y, list)
if y_is_list and len(y) == 0:
raise ValueError("'linear_regression_rows_nd': found no values for 'y'")
is_chained = y_is_list and isinstance(y[0], list)
if is_chained and any(len(lst) == 0 for lst in y):
raise ValueError("'linear_regression_rows': found empty inner list for 'y'")
y = wrap_to_list(y)
for e in (itertools.chain.from_iterable(y) if is_chained else y):
analyze('linear_regression_rows_nd/y', e, mt._col_indices)
for e in covariates:
analyze('linear_regression_rows_nd/covariates', e, mt._col_indices)
_warn_if_no_intercept('linear_regression_rows_nd', covariates)
x_field_name = Env.get_uid()
if is_chained:
y_field_name_groups = [[f'__y_{i}_{j}' for j in range(len(y[i]))] for i in range(len(y))]
y_dict = dict(zip(itertools.chain.from_iterable(y_field_name_groups), itertools.chain.from_iterable(y)))
else:
y_field_name_groups = list(f'__y_{i}' for i in range(len(y)))
y_dict = dict(zip(y_field_name_groups, y))
# Wrapping in a list since the code is written for the more general chained case.
y_field_name_groups = [y_field_name_groups]
cov_field_names = list(f'__cov{i}' for i in range(len(covariates)))
row_field_names = _get_regression_row_fields(mt, pass_through, 'linear_regression_rows_nd')
# FIXME: selecting an existing entry field should be emitted as a SelectFields
mt = mt._select_all(col_exprs=dict(**y_dict,
**dict(zip(cov_field_names, covariates))),
row_exprs=row_field_names,
col_key=[],
entry_exprs={x_field_name: x})
entries_field_name = 'ent'
sample_field_name = "by_sample"
num_y_lists = len(y_field_name_groups)
# Given a hail array, get the mean of the nonmissing entries and
# return new array where the missing entries are the mean.
def mean_impute(hl_array):
non_missing_mean = hl.mean(hl_array, filter_missing=True)
return hl_array.map(lambda entry: hl.if_else(hl.is_defined(entry), entry, non_missing_mean))
def select_array_indices(hl_array, indices):
return indices.map(lambda i: hl_array[i])
def dot_rows_with_themselves(matrix):
return (matrix * matrix).sum(1)
def no_missing(hail_array):
return hail_array.all(lambda element: hl.is_defined(element))
ht_local = mt._localize_entries(entries_field_name, sample_field_name)
ht = ht_local.transmute(**{entries_field_name: ht_local[entries_field_name][x_field_name]})
def setup_globals(ht):
# cov_arrays is per sample, then per cov.
if covariates:
ht = ht.annotate_globals(cov_arrays=ht[sample_field_name].map(lambda sample_struct: [sample_struct[cov_name] for cov_name in cov_field_names]))
else:
ht = ht.annotate_globals(cov_arrays=ht[sample_field_name].map(lambda sample_struct: hl.empty_array(hl.tfloat64)))
ht = ht.annotate_globals(
y_arrays_per_group=[ht[sample_field_name].map(lambda sample_struct: [sample_struct[y_name] for y_name in one_y_field_name_set]) for one_y_field_name_set in y_field_name_groups]
)
all_covs_defined = ht.cov_arrays.map(lambda sample_covs: no_missing(sample_covs))
def get_kept_samples(sample_ys):
# sample_ys is an array of samples, with each element being an array of the y_values
return hl.enumerate(sample_ys).filter(
lambda idx_and_y_values: all_covs_defined[idx_and_y_values[0]] & no_missing(idx_and_y_values[1])
).map(lambda idx_and_y_values: idx_and_y_values[0])
kept_samples = ht.y_arrays_per_group.map(get_kept_samples)
y_nds = hl.zip(kept_samples, ht.y_arrays_per_group).map(lambda sample_indices_and_y_arrays:
hl.nd.array(sample_indices_and_y_arrays[0].map(lambda idx:
sample_indices_and_y_arrays[1][idx])))
cov_nds = kept_samples.map(lambda group: hl.nd.array(group.map(lambda idx: ht.cov_arrays[idx])))
k = builtins.len(covariates)
ns = kept_samples.map(lambda one_sample_set: hl.len(one_sample_set))
cov_Qts = hl.if_else(k > 0,
cov_nds.map(lambda one_cov_nd: hl.nd.qr(one_cov_nd)[0].T),
ns.map(lambda n: hl.nd.zeros((0, n))))
Qtys = hl.zip(cov_Qts, y_nds).map(lambda cov_qt_and_y: cov_qt_and_y[0] @ cov_qt_and_y[1])
return ht.annotate_globals(
kept_samples=kept_samples,
__y_nds=y_nds,
ns=ns,
ds=ns.map(lambda n: n - k - 1),
__cov_Qts=cov_Qts,
__Qtys=Qtys,
__yyps=hl.range(num_y_lists).map(lambda i: dot_rows_with_themselves(y_nds[i].T) - dot_rows_with_themselves(Qtys[i].T)))
ht = setup_globals(ht)
def process_block(block):
rows_in_block = hl.len(block)
# Processes one block group based on given idx. Returns a single struct.
def process_y_group(idx):
X = hl.nd.array(block[entries_field_name].map(lambda row: mean_impute(select_array_indices(row, ht.kept_samples[idx])))).T
n = ht.ns[idx]
sum_x = X.sum(0)
Qtx = ht.__cov_Qts[idx] @ X
ytx = ht.__y_nds[idx].T @ X
xyp = ytx - (ht.__Qtys[idx].T @ Qtx)
xxpRec = (dot_rows_with_themselves(X.T) - dot_rows_with_themselves(Qtx.T)).map(lambda entry: 1 / entry)
b = xyp * xxpRec
se = ((1.0 / ht.ds[idx]) * (ht.__yyps[idx].reshape((-1, 1)) @ xxpRec.reshape((1, -1)) - (b * b))).map(lambda entry: hl.sqrt(entry))
t = b / se
return hl.rbind(t, lambda t:
hl.rbind(ht.ds[idx], lambda d:
hl.rbind(t.map(lambda entry: 2 * hl.expr.functions.pT(-hl.abs(entry), d, True, False)), lambda p:
hl.struct(n=hl.range(rows_in_block).map(lambda i: n), sum_x=sum_x._data_array(),
y_transpose_x=ytx.T._data_array(), beta=b.T._data_array(),
standard_error=se.T._data_array(), t_stat=t.T._data_array(),
p_value=p.T._data_array()))))
per_y_list = hl.range(num_y_lists).map(lambda i: process_y_group(i))
key_field_names = [key_field for key_field in ht.key]
def build_row(row_idx):
# For every field we care about, map across all y's, getting the row_idxth one from each.
idxth_keys = {field_name: block[field_name][row_idx] for field_name in key_field_names}
computed_row_field_names = ['n', 'sum_x', 'y_transpose_x', 'beta', 'standard_error', 't_stat', 'p_value']
computed_row_fields = {
field_name: per_y_list.map(lambda one_y: one_y[field_name][row_idx]) for field_name in computed_row_field_names
}
pass_through_rows = {
field_name: block[field_name][row_idx] for field_name in row_field_names
}
if not is_chained:
computed_row_fields = {key: value[0] for key, value in computed_row_fields.items()}
return hl.struct(**{**idxth_keys, **computed_row_fields, **pass_through_rows})
new_rows = hl.range(rows_in_block).map(build_row)
return new_rows
def process_partition(part):
grouped = part.grouped(block_size)
return grouped.flatmap(lambda block: process_block(block))
res = ht._map_partitions(process_partition)
if not y_is_list:
fields = ['y_transpose_x', 'beta', 'standard_error', 't_stat', 'p_value']
res = res.annotate(**{f: res[f][0] for f in fields})
res = res.select_globals()
return res
@typecheck(test=enumeration('wald', 'lrt', 'score', 'firth'),
y=oneof(expr_float64, sequenceof(expr_float64)),
x=expr_float64,
covariates=sequenceof(expr_float64),
pass_through=sequenceof(oneof(str, Expression)))
def logistic_regression_rows(test, y, x, covariates, pass_through=()) -> hail.Table:
r"""For each row, test an input variable for association with a
binary response variable using logistic regression.
Examples
--------
Run the logistic regression Wald test per variant using a Boolean
phenotype, intercept and two covariates stored in column-indexed
fields:
>>> result_ht = hl.logistic_regression_rows(
... test='wald',
... y=dataset.pheno.is_case,
... x=dataset.GT.n_alt_alleles(),
... covariates=[1, dataset.pheno.age, dataset.pheno.is_female])
Run the logistic regression Wald test per variant using a list of binary (0/1)
phenotypes, intercept and two covariates stored in column-indexed
fields:
>>> result_ht = hl.logistic_regression_rows(
... test='wald',
... y=[dataset.pheno.is_case, dataset.pheno.is_case], # where pheno values are 0, 1, or missing
... x=dataset.GT.n_alt_alleles(),
... covariates=[1, dataset.pheno.age, dataset.pheno.is_female])
Warning
-------
:func:`.logistic_regression_rows` considers the same set of
columns (i.e., samples, points) for every row, namely those columns for
which **all** response variables and covariates are defined. For each row, missing values of
`x` are mean-imputed over these columns. As in the example, the
intercept covariate ``1`` must be included **explicitly** if desired.
Notes
-----
This method performs, for each row, a significance test of the input
variable in predicting a binary (case-control) response variable based
on the logistic regression model. The response variable type must either
be numeric (with all present values 0 or 1) or Boolean, in which case
true and false are coded as 1 and 0, respectively.
Hail supports the Wald test ('wald'), likelihood ratio test ('lrt'),
Rao score test ('score'), and Firth test ('firth'). Hail only includes
columns for which the response variable and all covariates are defined.
For each row, Hail imputes missing input values as the mean of the
non-missing values.
The example above considers a model of the form
.. math::
\mathrm{Prob}(\mathrm{is\_case}) =
\mathrm{sigmoid}(\beta_0 + \beta_1 \, \mathrm{gt}
+ \beta_2 \, \mathrm{age}
+ \beta_3 \, \mathrm{is\_female} + \varepsilon),
\quad
\varepsilon \sim \mathrm{N}(0, \sigma^2)
where :math:`\mathrm{sigmoid}` is the `sigmoid function`_, the genotype
:math:`\mathrm{gt}` is coded as 0 for HomRef, 1 for Het, and 2 for
HomVar, and the Boolean covariate :math:`\mathrm{is\_female}` is coded as
for ``True`` (female) and 0 for ``False`` (male). The null model sets
:math:`\beta_1 = 0`.
.. _sigmoid function: https://en.wikipedia.org/wiki/Sigmoid_function
The structure of the emitted row field depends on the test statistic as
shown in the tables below.
========== ================== ======= ============================================
Test Field Type Value
========== ================== ======= ============================================
Wald `beta` float64 fit effect coefficient,
:math:`\hat\beta_1`
Wald `standard_error` float64 estimated standard error,
:math:`\widehat{\mathrm{se}}`
Wald `z_stat` float64 Wald :math:`z`-statistic, equal to
:math:`\hat\beta_1 / \widehat{\mathrm{se}}`
Wald `p_value` float64 Wald p-value testing :math:`\beta_1 = 0`
LRT, Firth `beta` float64 fit effect coefficient,
:math:`\hat\beta_1`
LRT, Firth `chi_sq_stat` float64 deviance statistic
LRT, Firth `p_value` float64 LRT / Firth p-value testing
:math:`\beta_1 = 0`
Score `chi_sq_stat` float64 score statistic
Score `p_value` float64 score p-value testing :math:`\beta_1 = 0`
========== ================== ======= ============================================
For the Wald and likelihood ratio tests, Hail fits the logistic model for
each row using Newton iteration and only emits the above fields
when the maximum likelihood estimate of the coefficients converges. The
Firth test uses a modified form of Newton iteration. To help diagnose
convergence issues, Hail also emits three fields which summarize the
iterative fitting process:
================ =================== ======= ===============================
Test Field Type Value
================ =================== ======= ===============================
Wald, LRT, Firth `fit.n_iterations` int32 number of iterations until
convergence, explosion, or
reaching the max (25 for
Wald, LRT; 100 for Firth)
Wald, LRT, Firth `fit.converged` bool ``True`` if iteration converged
Wald, LRT, Firth `fit.exploded` bool ``True`` if iteration exploded
================ =================== ======= ===============================
We consider iteration to have converged when every coordinate of
:math:`\beta` changes by less than :math:`10^{-6}`. For Wald and LRT,
up to 25 iterations are attempted; in testing we find 4 or 5 iterations
nearly always suffice. Convergence may also fail due to explosion,
which refers to low-level numerical linear algebra exceptions caused by
manipulating ill-conditioned matrices. Explosion may result from (nearly)
linearly dependent covariates or complete separation_.
.. _separation: https://en.wikipedia.org/wiki/Separation_(statistics)
A more common situation in genetics is quasi-complete seperation, e.g.
variants that are observed only in cases (or controls). Such variants
inevitably arise when testing millions of variants with very low minor
allele count. The maximum likelihood estimate of :math:`\beta` under
logistic regression is then undefined but convergence may still occur
after a large number of iterations due to a very flat likelihood
surface. In testing, we find that such variants produce a secondary bump
from 10 to 15 iterations in the histogram of number of iterations per
variant. We also find that this faux convergence produces large standard
errors and large (insignificant) p-values. To not miss such variants,
consider using Firth logistic regression, linear regression, or
group-based tests.
Here's a concrete illustration of quasi-complete seperation in R. Suppose
we have 2010 samples distributed as follows for a particular variant:
======= ====== === ======
Status HomRef Het HomVar
======= ====== === ======
Case 1000 10 0
Control 1000 0 0
======= ====== === ======
The following R code fits the (standard) logistic, Firth logistic,
and linear regression models to this data, where ``x`` is genotype,
``y`` is phenotype, and ``logistf`` is from the logistf package:
.. code-block:: R
x <- c(rep(0,1000), rep(1,1000), rep(1,10)
y <- c(rep(0,1000), rep(0,1000), rep(1,10))
logfit <- glm(y ~ x, family=binomial())
firthfit <- logistf(y ~ x)
linfit <- lm(y ~ x)
The resulting p-values for the genotype coefficient are 0.991, 0.00085,
and 0.0016, respectively. The erroneous value 0.991 is due to
quasi-complete separation. Moving one of the 10 hets from case to control
eliminates this quasi-complete separation; the p-values from R are then
0.0373, 0.0111, and 0.0116, respectively, as expected for a less
significant association.
The Firth test reduces bias from small counts and resolves the issue of
separation by penalizing maximum likelihood estimation by the `Jeffrey's
invariant prior <https://en.wikipedia.org/wiki/Jeffreys_prior>`__. This
test is slower, as both the null and full model must be fit per variant,
and convergence of the modified Newton method is linear rather than
quadratic. For Firth, 100 iterations are attempted for the null model
and, if that is successful, for the full model as well. In testing we
find 20 iterations nearly always suffices. If the null model fails to
converge, then the `logreg.fit` fields reflect the null model;
otherwise, they reflect the full model.
See
`Recommended joint and meta-analysis strategies for case-control association testing of single low-count variants <http://www.ncbi.nlm.nih.gov/pmc/articles/PMC4049324/>`__
for an empirical comparison of the logistic Wald, LRT, score, and Firth
tests. The theoretical foundations of the Wald, likelihood ratio, and score
tests may be found in Chapter 3 of Gesine Reinert's notes
`Statistical Theory <http://www.stats.ox.ac.uk/~reinert/stattheory/theoryshort09.pdf>`__.
Firth introduced his approach in
`Bias reduction of maximum likelihood estimates, 1993 <http://www2.stat.duke.edu/~scs/Courses/Stat376/Papers/GibbsFieldEst/BiasReductionMLE.pdf>`__.
Heinze and Schemper further analyze Firth's approach in
`A solution to the problem of separation in logistic regression, 2002 <https://cemsiis.meduniwien.ac.at/fileadmin/msi_akim/CeMSIIS/KB/volltexte/Heinze_Schemper_2002_Statistics_in_Medicine.pdf>`__.
Hail's logistic regression tests correspond to the ``b.wald``,
``b.lrt``, and ``b.score`` tests in `EPACTS`_. For each variant, Hail
imputes missing input values as the mean of non-missing input values,
whereas EPACTS subsets to those samples with called genotypes. Hence,
Hail and EPACTS results will currently only agree for variants with no
missing genotypes.
.. _EPACTS: http://genome.sph.umich.edu/wiki/EPACTS#Single_Variant_Tests
Note
----
Use the `pass_through` parameter to include additional row fields from
matrix table underlying ``x``. For example, to include an "rsid" field, set
``pass_through=['rsid']`` or ``pass_through=[mt.rsid]``.
Parameters
----------
test : {'wald', 'lrt', 'score', 'firth'}
Statistical test.
y : :class:`.Float64Expression` or :obj:`list` of :class:`.Float64Expression`
One or more column-indexed response expressions.
All non-missing values must evaluate to 0 or 1.
Note that a :class:`.BooleanExpression` will be implicitly converted to
a :class:`.Float64Expression` with this property.
x : :class:`.Float64Expression`
Entry-indexed expression for input variable.
covariates : :obj:`list` of :class:`.Float64Expression`
Non-empty list of column-indexed covariate expressions.
pass_through : :obj:`list` of :class:`str` or :class:`.Expression`
Additional row fields to include in the resulting table.
Returns
-------
:class:`.Table`
"""
if len(covariates) == 0:
raise ValueError('logistic regression requires at least one covariate expression')
mt = matrix_table_source('logistic_regresion_rows/x', x)
check_entry_indexed('logistic_regresion_rows/x', x)
y_is_list = isinstance(y, list)
if y_is_list and len(y) == 0:
raise ValueError("'logistic_regression_rows': found no values for 'y'")
y = wrap_to_list(y)
for e in covariates:
analyze('logistic_regression_rows/covariates', e, mt._col_indices)
_warn_if_no_intercept('logistic_regression_rows', covariates)
x_field_name = Env.get_uid()
y_field = [f'__y_{i}' for i in range(len(y))]
y_dict = dict(zip(y_field, y))
cov_field_names = [f'__cov{i}' for i in range(len(covariates))]
row_fields = _get_regression_row_fields(mt, pass_through, 'logistic_regression_rows')
# FIXME: selecting an existing entry field should be emitted as a SelectFields
mt = mt._select_all(col_exprs=dict(**y_dict,
**dict(zip(cov_field_names, covariates))),
row_exprs=row_fields,
col_key=[],
entry_exprs={x_field_name: x})
config = {
'name': 'LogisticRegression',
'test': test,
'yFields': y_field,
'xField': x_field_name,
'covFields': cov_field_names,
'passThrough': [x for x in row_fields if x not in mt.row_key]
}
result = Table(ir.MatrixToTableApply(mt._mir, config))
if not y_is_list:
result = result.transmute(**result.logistic_regression[0])
return result.persist()
# Helpers for logreg:
def mean_impute(hl_array):
non_missing_mean = hl.mean(hl_array, filter_missing=True)
return hl_array.map(lambda entry: hl.if_else(hl.is_defined(entry), entry, non_missing_mean))
def sigmoid(hl_nd):
return hl_nd.map(lambda x: hl.if_else(x > 0, hl.rbind(hl.exp(x), lambda exped: exped / (exped + 1)), 1 / (1 + hl.exp(-x))))
def nd_max(hl_nd):
return hl.max(hl_nd.reshape(-1)._data_array())
def logreg_fit(X, y, null_fit=None, max_iter=25, tol=1E-6):
assert(X.ndim == 2)
assert(y.ndim == 1)
# X is samples by covs.
# y is length num samples, for one cov.
n = X.shape[0]
m = X.shape[1]
if null_fit is None:
avg = y.sum() / n
logit_avg = hl.log(avg / (1 - avg))
b = hl.nd.hstack([hl.nd.array([logit_avg]), hl.nd.zeros((hl.int32(m - 1)))])
mu = sigmoid(X @ b)
score = X.T @ (y - mu)
# Reshape so we do a rowwise multiply
fisher = X.T @ (X * (mu * (1 - mu)).reshape(-1, 1))
else:
# num covs used to fit null model.
m0 = null_fit.b.shape[0]
m_diff = m - m0
X0 = X[:, 0:m0]
X1 = X[:, m0:]
b = hl.nd.hstack([null_fit.b, hl.nd.zeros((m_diff,))])
mu = sigmoid(X @ b)
score = hl.nd.hstack([null_fit.score, X1.T @ (y - mu)])
fisher00 = null_fit.fisher
fisher01 = X0.T @ (X1 * (mu * (1 - mu)).reshape(-1, 1))
fisher10 = fisher01.T
fisher11 = X1.T @ (X1 * (mu * (1 - mu)).reshape(-1, 1))
fisher = hl.nd.vstack([
hl.nd.hstack([fisher00, fisher01]),
hl.nd.hstack([fisher10, fisher11])
])
# Useful type abbreviations
tvector64 = hl.tndarray(hl.tfloat64, 1)
tmatrix64 = hl.tndarray(hl.tfloat64, 2)
search_return_type = hl.tstruct(b=tvector64, score=tvector64, fisher=tmatrix64, num_iter=hl.tint32, log_lkhd=hl.tfloat64, converged=hl.tbool, exploded=hl.tbool)
def na(field_name):
return hl.missing(search_return_type[field_name])
# Need to do looping now.
def search(recur, cur_iter, b, mu, score, fisher):
delta_b_struct = hl.nd.solve(fisher, score, no_crash=True)
exploded = delta_b_struct.failed
delta_b = delta_b_struct.solution
max_delta_b = nd_max(delta_b.map(lambda e: hl.abs(e)))
log_lkhd = ((y * mu) + (1 - y) * (1 - mu)).map(lambda e: hl.log(e)).sum()
def compute_next_iter(cur_iter, b, mu, score, fisher):
cur_iter = cur_iter + 1
b = b + delta_b
mu = sigmoid(X @ b)
score = X.T @ (y - mu)
fisher = X.T @ (X * (mu * (1 - mu)).reshape(-1, 1))
return recur(cur_iter, b, mu, score, fisher)
return (hl.case()
.when(exploded | hl.is_nan(delta_b[0]), hl.struct(b=na('b'), score=na('score'), fisher=na('fisher'), num_iter=cur_iter, log_lkhd=log_lkhd, converged=False, exploded=True))
.when(cur_iter > max_iter, hl.struct(b=na('b'), score=na('score'), fisher=na('fisher'), num_iter=cur_iter, log_lkhd=log_lkhd, converged=False, exploded=False))
.when(max_delta_b < tol, hl.struct(b=b, score=score, fisher=fisher, num_iter=cur_iter, log_lkhd=log_lkhd, converged=True, exploded=False))
.default(compute_next_iter(cur_iter, b, mu, score, fisher)))
res_struct = hl.experimental.loop(search, search_return_type, 1, b, mu, score, fisher)
return res_struct
def wald_test(X, y, null_fit, link):
assert (link == "logistic")
fit = logreg_fit(X, y, null_fit)
se = hl.nd.diagonal(hl.nd.inv(fit.fisher)).map(lambda e: hl.sqrt(e))
z = fit.b / se
p = z.map(lambda e: 2 * hl.pnorm(-hl.abs(e)))
return hl.struct(
beta=fit.b[X.shape[1] - 1],
standard_error=se[X.shape[1] - 1],
z_stat=z[X.shape[1] - 1],
p_value=p[X.shape[1] - 1],
fit=hl.struct(n_iterations=fit.num_iter, converged=fit.converged, exploded=fit.exploded))
def lrt_test(X, y, null_fit, link):
assert (link == "logistic")
fit = logreg_fit(X, y, null_fit)
chi_sq = hl.if_else(~fit.converged, hl.missing(hl.tfloat64), 2 * (fit.log_lkhd - null_fit.log_lkhd))
p = hl.pchisqtail(chi_sq, X.shape[1] - null_fit.b.shape[0])
return hl.struct(
beta=fit.b[X.shape[1] - 1],
chi_sq_stat=chi_sq,
p_value=p,
fit=hl.struct(n_iterations=fit.num_iter, converged=fit.converged, exploded=fit.exploded))
@typecheck(test=enumeration('wald', 'lrt', 'score', 'firth'),
y=oneof(expr_float64, sequenceof(expr_float64)),
x=expr_float64,
covariates=sequenceof(expr_float64),
pass_through=sequenceof(oneof(str, Expression)))
def _logistic_regression_rows_nd(test, y, x, covariates, pass_through=()) -> hail.Table:
r"""For each row, test an input variable for association with a
binary response variable using logistic regression.
Examples
--------
Run the logistic regression Wald test per variant using a Boolean
phenotype, intercept and two covariates stored in column-indexed
fields:
>>> result_ht = hl.logistic_regression_rows(
... test='wald',
... y=dataset.pheno.is_case,
... x=dataset.GT.n_alt_alleles(),
... covariates=[1, dataset.pheno.age, dataset.pheno.is_female])
Run the logistic regression Wald test per variant using a list of binary (0/1)
phenotypes, intercept and two covariates stored in column-indexed
fields:
>>> result_ht = hl.logistic_regression_rows(
... test='wald',
... y=[dataset.pheno.is_case, dataset.pheno.is_case], # where pheno values are 0, 1, or missing
... x=dataset.GT.n_alt_alleles(),
... covariates=[1, dataset.pheno.age, dataset.pheno.is_female])
Warning
-------
:func:`.logistic_regression_rows` considers the same set of
columns (i.e., samples, points) for every row, namely those columns for
which **all** response variables and covariates are defined. For each row, missing values of
`x` are mean-imputed over these columns. As in the example, the
intercept covariate ``1`` must be included **explicitly** if desired.
Notes
-----
This method performs, for each row, a significance test of the input
variable in predicting a binary (case-control) response variable based
on the logistic regression model. The response variable type must either
be numeric (with all present values 0 or 1) or Boolean, in which case
true and false are coded as 1 and 0, respectively.
Hail supports the Wald test ('wald'), likelihood ratio test ('lrt'),
Rao score test ('score'), and Firth test ('firth'). Hail only includes
columns for which the response variable and all covariates are defined.
For each row, Hail imputes missing input values as the mean of the
non-missing values.
The example above considers a model of the form
.. math::
\mathrm{Prob}(\mathrm{is\_case}) =
\mathrm{sigmoid}(\beta_0 + \beta_1 \, \mathrm{gt}
+ \beta_2 \, \mathrm{age}
+ \beta_3 \, \mathrm{is\_female} + \varepsilon),
\quad
\varepsilon \sim \mathrm{N}(0, \sigma^2)
where :math:`\mathrm{sigmoid}` is the `sigmoid function`_, the genotype
:math:`\mathrm{gt}` is coded as 0 for HomRef, 1 for Het, and 2 for
HomVar, and the Boolean covariate :math:`\mathrm{is\_female}` is coded as
for ``True`` (female) and 0 for ``False`` (male). The null model sets
:math:`\beta_1 = 0`.
.. _sigmoid function: https://en.wikipedia.org/wiki/Sigmoid_function
The structure of the emitted row field depends on the test statistic as
shown in the tables below.
========== ================== ======= ============================================
Test Field Type Value
========== ================== ======= ============================================
Wald `beta` float64 fit effect coefficient,
:math:`\hat\beta_1`
Wald `standard_error` float64 estimated standard error,
:math:`\widehat{\mathrm{se}}`
Wald `z_stat` float64 Wald :math:`z`-statistic, equal to
:math:`\hat\beta_1 / \widehat{\mathrm{se}}`
Wald `p_value` float64 Wald p-value testing :math:`\beta_1 = 0`
LRT, Firth `beta` float64 fit effect coefficient,
:math:`\hat\beta_1`
LRT, Firth `chi_sq_stat` float64 deviance statistic
LRT, Firth `p_value` float64 LRT / Firth p-value testing
:math:`\beta_1 = 0`
Score `chi_sq_stat` float64 score statistic
Score `p_value` float64 score p-value testing :math:`\beta_1 = 0`
========== ================== ======= ============================================
For the Wald and likelihood ratio tests, Hail fits the logistic model for
each row using Newton iteration and only emits the above fields
when the maximum likelihood estimate of the coefficients converges. The
Firth test uses a modified form of Newton iteration. To help diagnose
convergence issues, Hail also emits three fields which summarize the
iterative fitting process:
================ =================== ======= ===============================
Test Field Type Value
================ =================== ======= ===============================
Wald, LRT, Firth `fit.n_iterations` int32 number of iterations until
convergence, explosion, or
reaching the max (25 for
Wald, LRT; 100 for Firth)
Wald, LRT, Firth `fit.converged` bool ``True`` if iteration converged
Wald, LRT, Firth `fit.exploded` bool ``True`` if iteration exploded
================ =================== ======= ===============================
We consider iteration to have converged when every coordinate of
:math:`\beta` changes by less than :math:`10^{-6}`. For Wald and LRT,
up to 25 iterations are attempted; in testing we find 4 or 5 iterations
nearly always suffice. Convergence may also fail due to explosion,
which refers to low-level numerical linear algebra exceptions caused by
manipulating ill-conditioned matrices. Explosion may result from (nearly)
linearly dependent covariates or complete separation_.
.. _separation: https://en.wikipedia.org/wiki/Separation_(statistics)
A more common situation in genetics is quasi-complete seperation, e.g.
variants that are observed only in cases (or controls). Such variants
inevitably arise when testing millions of variants with very low minor
allele count. The maximum likelihood estimate of :math:`\beta` under
logistic regression is then undefined but convergence may still occur
after a large number of iterations due to a very flat likelihood
surface. In testing, we find that such variants produce a secondary bump
from 10 to 15 iterations in the histogram of number of iterations per
variant. We also find that this faux convergence produces large standard
errors and large (insignificant) p-values. To not miss such variants,
consider using Firth logistic regression, linear regression, or
group-based tests.
Here's a concrete illustration of quasi-complete seperation in R. Suppose
we have 2010 samples distributed as follows for a particular variant:
======= ====== === ======
Status HomRef Het HomVar
======= ====== === ======
Case 1000 10 0
Control 1000 0 0
======= ====== === ======
The following R code fits the (standard) logistic, Firth logistic,
and linear regression models to this data, where ``x`` is genotype,
``y`` is phenotype, and ``logistf`` is from the logistf package:
.. code-block:: R
x <- c(rep(0,1000), rep(1,1000), rep(1,10)
y <- c(rep(0,1000), rep(0,1000), rep(1,10))
logfit <- glm(y ~ x, family=binomial())
firthfit <- logistf(y ~ x)
linfit <- lm(y ~ x)
The resulting p-values for the genotype coefficient are 0.991, 0.00085,
and 0.0016, respectively. The erroneous value 0.991 is due to
quasi-complete separation. Moving one of the 10 hets from case to control
eliminates this quasi-complete separation; the p-values from R are then
0.0373, 0.0111, and 0.0116, respectively, as expected for a less
significant association.
The Firth test reduces bias from small counts and resolves the issue of
separation by penalizing maximum likelihood estimation by the `Jeffrey's
invariant prior <https://en.wikipedia.org/wiki/Jeffreys_prior>`__. This
test is slower, as both the null and full model must be fit per variant,
and convergence of the modified Newton method is linear rather than
quadratic. For Firth, 100 iterations are attempted for the null model
and, if that is successful, for the full model as well. In testing we
find 20 iterations nearly always suffices. If the null model fails to
converge, then the `logreg.fit` fields reflect the null model;
otherwise, they reflect the full model.
See
`Recommended joint and meta-analysis strategies for case-control association testing of single low-count variants <http://www.ncbi.nlm.nih.gov/pmc/articles/PMC4049324/>`__
for an empirical comparison of the logistic Wald, LRT, score, and Firth
tests. The theoretical foundations of the Wald, likelihood ratio, and score
tests may be found in Chapter 3 of Gesine Reinert's notes
`Statistical Theory <http://www.stats.ox.ac.uk/~reinert/stattheory/theoryshort09.pdf>`__.
Firth introduced his approach in
`Bias reduction of maximum likelihood estimates, 1993 <http://www2.stat.duke.edu/~scs/Courses/Stat376/Papers/GibbsFieldEst/BiasReductionMLE.pdf>`__.
Heinze and Schemper further analyze Firth's approach in
`A solution to the problem of separation in logistic regression, 2002 <https://cemsiis.meduniwien.ac.at/fileadmin/msi_akim/CeMSIIS/KB/volltexte/Heinze_Schemper_2002_Statistics_in_Medicine.pdf>`__.
Hail's logistic regression tests correspond to the ``b.wald``,
``b.lrt``, and ``b.score`` tests in `EPACTS`_. For each variant, Hail
imputes missing input values as the mean of non-missing input values,
whereas EPACTS subsets to those samples with called genotypes. Hence,
Hail and EPACTS results will currently only agree for variants with no
missing genotypes.
.. _EPACTS: http://genome.sph.umich.edu/wiki/EPACTS#Single_Variant_Tests
Note
----
Use the `pass_through` parameter to include additional row fields from
matrix table underlying ``x``. For example, to include an "rsid" field, set
``pass_through=['rsid']`` or ``pass_through=[mt.rsid]``.
Parameters
----------
test : {'wald', 'lrt', 'score', 'firth'}
Statistical test.
y : :class:`.Float64Expression` or :obj:`list` of :class:`.Float64Expression`
One or more column-indexed response expressions.
All non-missing values must evaluate to 0 or 1.
Note that a :class:`.BooleanExpression` will be implicitly converted to
a :class:`.Float64Expression` with this property.
x : :class:`.Float64Expression`
Entry-indexed expression for input variable.
covariates : :obj:`list` of :class:`.Float64Expression`
Non-empty list of column-indexed covariate expressions.
pass_through : :obj:`list` of :class:`str` or :class:`.Expression`
Additional row fields to include in the resulting table.
Returns
-------
:class:`.Table`
"""
if len(covariates) == 0:
raise ValueError('logistic regression requires at least one covariate expression')
mt = matrix_table_source('logistic_regresion_rows/x', x)
check_entry_indexed('logistic_regresion_rows/x', x)
y_is_list = isinstance(y, list)
if y_is_list and len(y) == 0:
raise ValueError("'logistic_regression_rows': found no values for 'y'")
y = wrap_to_list(y)
for e in covariates:
analyze('logistic_regression_rows/covariates', e, mt._col_indices)
# _warn_if_no_intercept('logistic_regression_rows', covariates)
x_field_name = Env.get_uid()
y_field_names = [f'__y_{i}' for i in range(len(y))]
num_y_fields = len(y_field_names)
y_dict = dict(zip(y_field_names, y))
cov_field_names = [f'__cov{i}' for i in range(len(covariates))]
row_fields = _get_regression_row_fields(mt, pass_through, 'logistic_regression_rows')
# Handle filtering columns with missing values:
mt = mt.filter_cols(hl.array(y + covariates).all(hl.is_defined))
# FIXME: selecting an existing entry field should be emitted as a SelectFields
mt = mt._select_all(col_exprs=dict(**y_dict,
**dict(zip(cov_field_names, covariates))),
row_exprs=row_fields,
col_key=[],
entry_exprs={x_field_name: x})
sample_field_name = "samples"
ht = mt._localize_entries("entries", sample_field_name)
# cov_nd rows are samples, columns are the different covariates
if covariates:
ht = ht.annotate_globals(cov_nd=hl.nd.array(ht[sample_field_name].map(lambda sample_struct: [sample_struct[cov_name] for cov_name in cov_field_names])))
else:
ht = ht.annotate_globals(cov_nd=hl.nd.array(ht[sample_field_name].map(lambda sample_struct: hl.empty_array(hl.tfloat64))))
# y_nd rows are samples, columns are the various dependent variables.
ht = ht.annotate_globals(y_nd=hl.nd.array(ht[sample_field_name].map(lambda sample_struct: [sample_struct[y_name] for y_name in y_field_names])))
# Fit null models, which means doing a logreg fit with just the covariates for each phenotype.
null_models = hl.range(num_y_fields).map(lambda idx: logreg_fit(ht.cov_nd, ht.y_nd[:, idx]))
ht = ht.annotate_globals(nulls=null_models)
ht = ht.transmute(x=hl.nd.array(mean_impute(ht.entries[x_field_name])))
if test == "wald":
# For each y vector, need to do wald test.
covs_and_x = hl.nd.hstack([ht.cov_nd, ht.x.reshape((-1, 1))])
wald_structs = hl.range(num_y_fields).map(lambda idx: wald_test(covs_and_x, ht.y_nd[:, idx], ht.nulls[idx], "logistic"))
ht = ht.annotate(logistic_regression=wald_structs)
elif test == "lrt":
covs_and_x = hl.nd.hstack([ht.cov_nd, ht.x.reshape((-1, 1))])
lrt_structs = hl.range(num_y_fields).map(lambda idx: lrt_test(covs_and_x, ht.y_nd[:, idx], ht.nulls[idx], "logistic"))
ht = ht.annotate(logistic_regression=lrt_structs)
else:
raise ValueError("Only support wald and lrt so far")
if not y_is_list:
ht = ht.transmute(**ht.logistic_regression[0])
ht = ht.drop("x")
return ht
@typecheck(test=enumeration('wald', 'lrt', 'score'),
y=expr_float64,
x=expr_float64,
covariates=sequenceof(expr_float64),
pass_through=sequenceof(oneof(str, Expression)))
def poisson_regression_rows(test, y, x, covariates, pass_through=()) -> Table:
r"""For each row, test an input variable for association with a
count response variable using `Poisson regression <https://en.wikipedia.org/wiki/Poisson_regression>`__.
Notes
-----
See :func:`.logistic_regression_rows` for more info on statistical tests
of general linear models.
Note
----
Use the `pass_through` parameter to include additional row fields from
matrix table underlying ``x``. For example, to include an "rsid" field, set
``pass_through=['rsid']`` or ``pass_through=[mt.rsid]``.
Parameters
----------
y : :class:`.Float64Expression`
Column-indexed response expression.
All non-missing values must evaluate to a non-negative integer.
x : :class:`.Float64Expression`
Entry-indexed expression for input variable.
covariates : :obj:`list` of :class:`.Float64Expression`
Non-empty list of column-indexed covariate expressions.
pass_through : :obj:`list` of :class:`str` or :class:`.Expression`
Additional row fields to include in the resulting table.
Returns
-------
:class:`.Table`
"""
if len(covariates) == 0:
raise ValueError('Poisson regression requires at least one covariate expression')
mt = matrix_table_source('poisson_regression_rows/x', x)
check_entry_indexed('poisson_regression_rows/x', x)
analyze('poisson_regression_rows/y', y, mt._col_indices)
all_exprs = [y]
for e in covariates:
all_exprs.append(e)
analyze('poisson_regression_rows/covariates', e, mt._col_indices)
_warn_if_no_intercept('poisson_regression_rows', covariates)
x_field_name = Env.get_uid()
y_field_name = '__y'
cov_field_names = list(f'__cov{i}' for i in range(len(covariates)))
row_fields = _get_regression_row_fields(mt, pass_through, 'poisson_regression_rows')
# FIXME: selecting an existing entry field should be emitted as a SelectFields
mt = mt._select_all(col_exprs=dict(**{y_field_name: y},
**dict(zip(cov_field_names, covariates))),
row_exprs=row_fields,
col_key=[],
entry_exprs={x_field_name: x})
config = {
'name': 'PoissonRegression',
'test': test,
'yField': y_field_name,
'xField': x_field_name,
'covFields': cov_field_names,
'passThrough': [x for x in row_fields if x not in mt.row_key]
}
return Table(ir.MatrixToTableApply(mt._mir, config)).persist()
@typecheck(y=expr_float64,
x=sequenceof(expr_float64),
z_t=nullable(expr_float64),
k=nullable(np.ndarray),
p_path=nullable(str),
overwrite=bool,
standardize=bool,
mean_impute=bool)
def linear_mixed_model(y,
x,
z_t=None,
k=None,
p_path=None,
overwrite=False,
standardize=True,
mean_impute=True):
r"""Initialize a linear mixed model from a matrix table.
Examples
--------
Initialize a model using three fixed effects (including intercept) and
genetic marker random effects:
>>> marker_ds = dataset.filter_rows(dataset.use_as_marker) # doctest: +SKIP
>>> model, _ = hl.linear_mixed_model( # doctest: +SKIP
... y=marker_ds.pheno.height,
... x=[1, marker_ds.pheno.age, marker_ds.pheno.is_female],
... z_t=marker_ds.GT.n_alt_alleles(),
... p_path='output/p.bm')
Fit the model and examine :math:`h^2`:
>>> model.fit() # doctest: +SKIP
>>> model.h_sq # doctest: +SKIP
Sanity-check the normalized likelihood of :math:`h^2` over the percentile
grid:
>>> import matplotlib.pyplot as plt # doctest: +SKIP
>>> plt.plot(range(101), model.h_sq_normalized_lkhd()) # doctest: +SKIP
For this value of :math:`h^2`, test each variant for association:
>>> result_table = hl.linear_mixed_regression_rows(dataset.GT.n_alt_alleles(), model) # doctest: +SKIP
Alternatively, one can define a full-rank model using a pre-computed kinship
matrix :math:`K` in ndarray form. When :math:`K` is the realized
relationship matrix defined by the genetic markers, we obtain the same model
as above with :math:`P` written as a block matrix but returned as an
ndarray:
>>> rrm = hl.realized_relationship_matrix(marker_ds.GT).to_numpy() # doctest: +SKIP
>>> model, p = hl.linear_mixed_model( # doctest: +SKIP
... y=dataset.pheno.height,
... x=[1, dataset.pheno.age, dataset.pheno.is_female],
... k=rrm,
... p_path='output/p.bm',
... overwrite=True)
Notes
-----
See :class:`.LinearMixedModel` for details on the model and notation.
Exactly one of `z_t` and `k` must be set.
If `z_t` is set, the model is low-rank if the number of samples :math:`n` exceeds
the number of random effects :math:`m`. At least one dimension must be less
than or equal to 46300. If `standardize` is true, each random effect is first
standardized to have mean 0 and variance :math:`\frac{1}{m}`, so that the
diagonal values of the kinship matrix :math:`K = ZZ^T` are 1.0 in
expectation. This kinship matrix corresponds to the
:meth:`realized_relationship_matrix` in genetics. See
:meth:`.LinearMixedModel.from_random_effects` and :meth:`.BlockMatrix.svd`
for more details.
If `k` is set, the model is full-rank. For correct results, the indices of
`k` **must be aligned** with columns of the source of `y`.
Set `p_path` if you plan to use the model in :func:`.linear_mixed_regression_rows`.
`k` must be positive semi-definite; symmetry is not checked as only the
lower triangle is used. See :meth:`.LinearMixedModel.from_kinship` for more
details.
Missing, nan, or infinite values in `y` or `x` will raise an error.
If set, `z_t` may only have missing values if `mean_impute` is true, in
which case missing values of are set to the row mean. We recommend setting
`mean_impute` to false if you expect no missing values, both for performance
and as a sanity check.
Warning
-------
If the rows of the matrix table have been filtered to a small fraction,
then :meth:`.MatrixTable.repartition` before this method to improve
performance.
Parameters
----------
y: :class:`.Float64Expression`
Column-indexed expression for the observations (rows of :math:`y`).
Must have no missing values.
x: :obj:`list` of :class:`.Float64Expression`
Non-empty list of column-indexed expressions for the fixed effects (rows of :math:`X`).
Each expression must have the same source as `y` or no source
(e.g., the intercept ``1.0``).
Must have no missing values.
z_t: :class:`.Float64Expression`, optional
Entry-indexed expression for each mixed effect. These values are
row-standardized to variance :math:`1 / m` to form the entries of
:math:`Z^T`. If `mean_impute` is false, must have no missing values.
Exactly one of `z_t` and `k` must be set.
k: :class:`numpy.ndarray`, optional
Kinship matrix :math:`K`.
Exactly one of `z_t` and `k` must be set.
p_path: :class:`str`, optional
Path at which to write the projection :math:`P` as a block matrix.
Required if `z_t` is set.
overwrite: :obj:`bool`
If ``True``, overwrite an existing file at `p_path`.
standardize: :obj:`bool`
If ``True``, standardize `z_t` by row to mean 0 and variance
:math:`\frac{1}{m}`.
mean_impute: :obj:`bool`
If ``True``, mean-impute missing values of `z_t` by row.
Returns
-------
model: :class:`.LinearMixedModel`
Linear mixed model ready to be fit.
p: :class:`numpy.ndarray` or :class:`.BlockMatrix`
Matrix :math:`P` whose rows are the eigenvectors of :math:`K`.
The type is block matrix if the model is low rank (i.e., if `z_t` is set
and :math:`n > m`).
"""
source = matrix_table_source('linear_mixed_model/y', y)
if ((z_t is None and k is None)
or (z_t is not None and k is not None)):
raise ValueError("linear_mixed_model: set exactly one of 'z_t' and 'k'")
if len(x) == 0:
raise ValueError("linear_mixed_model: 'x' must include at least one fixed effect")
_warn_if_no_intercept('linear_mixed_model', x)
# collect x and y in one pass
mt = source.select_cols(xy=hl.array(x + [y])).key_cols_by()
xy = np.array(mt.xy.collect(), dtype=np.float64)
xy = xy.reshape(xy.size // (len(x) + 1), len(x) + 1)
x_nd = np.copy(xy[:, :-1])
y_nd = np.copy(xy[:, -1])
n = y_nd.size
del xy
if not np.all(np.isfinite(y_nd)):
raise ValueError("linear_mixed_model: 'y' has missing, nan, or infinite values")
if not np.all(np.isfinite(x_nd)):
raise ValueError("linear_mixed_model: 'x' has missing, nan, or infinite values")
if z_t is None:
model, p = LinearMixedModel.from_kinship(y_nd, x_nd, k, p_path, overwrite)
else:
check_entry_indexed('from_matrix_table: z_t', z_t)
if matrix_table_source('linear_mixed_model/z_t', z_t) != source:
raise ValueError("linear_mixed_model: 'y' and 'z_t' must "
"have the same source")
z_bm = BlockMatrix.from_entry_expr(z_t,
mean_impute=mean_impute,
center=standardize,
normalize=standardize).T # variance is 1 / n
m = z_bm.shape[1]
model, p = LinearMixedModel.from_random_effects(y_nd, x_nd, z_bm, p_path, overwrite)
if standardize:
model.s = model.s * (n / m) # now variance is 1 / m
if model.low_rank and isinstance(p, np.ndarray):
assert n > m
p = BlockMatrix.read(p_path)
return model, p
@typecheck(entry_expr=expr_float64,
model=LinearMixedModel,
pa_t_path=nullable(str),
a_t_path=nullable(str),
mean_impute=bool,
partition_size=nullable(int),
pass_through=sequenceof(oneof(str, Expression)))
def linear_mixed_regression_rows(entry_expr,
model,
pa_t_path=None,
a_t_path=None,
mean_impute=True,
partition_size=None,
pass_through=()):
"""For each row, test an input variable for association using a linear
mixed model.
Examples
--------
See the example in :meth:`linear_mixed_model` and section below on
efficiently testing multiple responses or sets of fixed effects.
Notes
-----
See :class:`.LinearMixedModel` for details on the model and notation.
This method packages up several steps for convenience:
1. Read the transformation :math:`P` from ``model.p_path``.
2. Write `entry_expr` at `a_t_path` as the block matrix :math:`A^T` with
block size that of :math:`P`. The parallelism is ``n_rows / block_size``.
3. Multiply and write :math:`A^T P^T` at `pa_t_path`. The parallelism is the
number of blocks in :math:`(PA)^T`, which equals
``(n_rows / block_size) * (model.r / block_size)``.
4. Compute regression results per row with
:meth:`.LinearMixedModel.fit_alternatives`.
The parallelism is ``n_rows / partition_size``.
If `pa_t_path` and `a_t_path` are not set, temporary files are used.
`entry_expr` may only have missing values if `mean_impute` is true, in
which case missing values of are set to the row mean. We recommend setting
`mean_impute` to false if you expect no missing values, both for performance
and as a sanity check.
**Efficiently varying the response or set of fixed effects**
Computing :math:`K`, :math:`P`, :math:`S`, :math:`A^T`, and especially the
product :math:`(PA)^T` may require significant compute when :math:`n` and/or
:math:`m` is large. However these quantities are all independent of the
response :math:`y` or fixed effects :math:`X`! And with the model
diagonalized, Step 4 above is fast and scalable.
So having run linear mixed regression once, we can
compute :math:`h^2` and regression statistics for another response or set of
fixed effects on the **same samples** at the roughly the speed of
:func:`.linear_regression_rows`.
For example, having collected another `y` and `x` as ndarrays, one can
construct a new linear mixed model directly.
Supposing the model is full-rank and `p` is an ndarray:
>>> model = hl.stats.LinearMixedModel(p @ y, p @ x, s) # doctest: +SKIP
>>> model.fit() # doctest: +SKIP
>>> result_ht = model.fit_alternatives(pa_t_path) # doctest: +SKIP
Supposing the model is low-rank and `p` is a block matrix:
>>> p = BlockMatrix.read(p_path) # doctest: +SKIP
>>> py, px = (p @ y).to_numpy(), (p @ x).to_numpy() # doctest: +SKIP
>>> model = LinearMixedModel(py, px, s, y, x) # doctest: +SKIP
>>> model.fit() # doctest: +SKIP
>>> result_ht = model.fit_alternatives(pa_t_path, a_t_path) # doctest: +SKIP
In either case, one can easily loop through many responses or conditional
analyses. To join results back to the matrix table:
>>> dataset = dataset.add_row_index() # doctest: +SKIP
>>> dataset = dataset.annotate_rows(lmmreg=result_ht[dataset.row_idx]]) # doctest: +SKIP
Warning
-------
For correct results, the column-index of `entry_expr` must correspond to the
sample index of the model. This will be true, for example, if `model`
was created with :func:`.linear_mixed_model` using (a possibly row-filtered
version of) the source of `entry_expr`, or if `y` and `x` were collected to
arrays from this source. Hail will raise an error if the number of columns
does not match ``model.n``, but will not detect, for example, permuted
samples.
The warning on :meth:`.BlockMatrix.write_from_entry_expr` applies to this
method when the number of samples is large.
Note
----
Use the `pass_through` parameter to include additional row fields from
matrix table underlying ``entry_expr``. For example, to include an "rsid"
field, set` pass_through=['rsid']`` or ``pass_through=[mt.rsid]``.
Parameters
----------
entry_expr: :class:`.Float64Expression`
Entry-indexed expression for input variable.
If mean_impute is false, must have no missing values.
model: :class:`.LinearMixedModel`
Fit linear mixed model with ``path_p`` set.
pa_t_path: :class:`str`, optional
Path at which to store the transpose of :math:`PA`.
If not set, a temporary file is used.
a_t_path: :class:`str`, optional
Path at which to store the transpose of :math:`A`.
If not set, a temporary file is used.
mean_impute: :obj:`bool`
Mean-impute missing values of `entry_expr` by row.
partition_size: :obj:`int`
Number of rows to process per partition.
Default given by block size of :math:`P`.
pass_through : :obj:`list` of :class:`str` or :class:`.Expression`
Additional row fields to include in the resulting table.
Returns
-------
:class:`.Table`
"""
mt = matrix_table_source('linear_mixed_regression_rows', entry_expr)
n = mt.count_cols()
check_entry_indexed('linear_mixed_regression_rows', entry_expr)
if not model._fitted:
raise ValueError("linear_mixed_regression_rows: 'model' has not been fit "
"using 'fit()'")
if model.p_path is None:
raise ValueError("linear_mixed_regression_rows: 'model' property 'p_path' "
"was not set at initialization")
if model.n != n:
raise ValueError(f"linear_mixed_regression_rows: linear mixed model expects {model.n} samples, "
f"\n but 'entry_expr' source has {n} columns.")
pa_t_path = new_temp_file() if pa_t_path is None else pa_t_path
a_t_path = new_temp_file() if a_t_path is None else a_t_path
p = BlockMatrix.read(model.p_path)
BlockMatrix.write_from_entry_expr(entry_expr,
a_t_path,
mean_impute=mean_impute,
block_size=p.block_size)
a_t = BlockMatrix.read(a_t_path)
(a_t @ p.T).write(pa_t_path, force_row_major=True)
ht = model.fit_alternatives(pa_t_path,
a_t_path if model.low_rank else None,
partition_size)
row_fields = _get_regression_row_fields(mt, pass_through, 'linear_mixed_regression_rows')
mt_keys = mt.select_rows(**row_fields).add_row_index('__row_idx').rows().add_index('__row_idx').key_by('__row_idx')
return mt_keys.annotate(**ht[mt_keys['__row_idx']]).key_by(*mt.row_key).drop('__row_idx')
@typecheck(key_expr=expr_any,
weight_expr=expr_float64,
y=expr_float64,
x=expr_float64,
covariates=sequenceof(expr_float64),
logistic=bool,
max_size=int,
accuracy=numeric,
iterations=int)
def skat(key_expr, weight_expr, y, x, covariates, logistic=False,
max_size=46340, accuracy=1e-6, iterations=10000) -> Table:
r"""Test each keyed group of rows for association by linear or logistic
SKAT test.
Examples
--------
Test each gene for association using the linear sequence kernel association
test:
>>> skat_table = hl.skat(key_expr=burden_ds.gene,
... weight_expr=burden_ds.weight,
... y=burden_ds.burden.pheno,
... x=burden_ds.GT.n_alt_alleles(),
... covariates=[1, burden_ds.burden.cov1, burden_ds.burden.cov2])
.. caution::
By default, the Davies algorithm iterates up to 10k times until an
accuracy of 1e-6 is achieved. Hence a reported p-value of zero with no
issues may truly be as large as 1e-6. The accuracy and maximum number of
iterations may be controlled by the corresponding function parameters.
In general, higher accuracy requires more iterations.
.. caution::
To process a group with :math:`m` rows, several copies of an
:math:`m \times m` matrix of doubles must fit in worker memory. Groups
with tens of thousands of rows may exhaust worker memory causing the
entire job to fail. In this case, use the `max_size` parameter to skip
groups larger than `max_size`.
Warning
-------
:func:`.skat` considers the same set of columns (i.e., samples, points) for
every group, namely those columns for which **all** covariates are defined.
For each row, missing values of `x` are mean-imputed over these columns.
As in the example, the intercept covariate ``1`` must be included
**explicitly** if desired.
Notes
-----
This method provides a scalable implementation of the score-based
variance-component test originally described in
`Rare-Variant Association Testing for Sequencing Data with the Sequence Kernel Association Test
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3135811/>`__.
Row weights must be non-negative. Rows with missing weights are ignored. In
the R package ``skat``---which assumes rows are variants---default weights
are given by evaluating the Beta(1, 25) density at the minor allele
frequency. To replicate these weights in Hail using alternate allele
frequencies stored in a row-indexed field `AF`, one can use the expression:
>>> hl.dbeta(hl.min(ds2.AF), 1.0, 25.0) ** 2
In the logistic case, the response `y` must either be numeric (with all
present values 0 or 1) or Boolean, in which case true and false are coded
as 1 and 0, respectively.
The resulting :class:`.Table` provides the group's key (`id`), thenumber of
rows in the group (`size`), the variance component score `q_stat`, the SKAT
`p-value`, and a `fault` flag. For the toy example above, the table has the
form:
+-------+------+--------+---------+-------+
| id | size | q_stat | p_value | fault |
+=======+======+========+=========+=======+
| geneA | 2 | 4.136 | 0.205 | 0 |
+-------+------+--------+---------+-------+
| geneB | 1 | 5.659 | 0.195 | 0 |
+-------+------+--------+---------+-------+
| geneC | 3 | 4.122 | 0.192 | 0 |
+-------+------+--------+---------+-------+
Groups larger than `max_size` appear with missing `q_stat`, `p_value`, and
`fault`. The hard limit on the number of rows in a group is 46340.
Note that the variance component score `q_stat` agrees with ``Q`` in the R
package ``skat``, but both differ from :math:`Q` in the paper by the factor
:math:`\frac{1}{2\sigma^2}` in the linear case and :math:`\frac{1}{2}` in
the logistic case, where :math:`\sigma^2` is the unbiased estimator of
residual variance for the linear null model. The R package also applies a
"small-sample adjustment" to the null distribution in the logistic case
when the sample size is less than 2000. Hail does not apply this
adjustment.
The fault flag is an integer indicating whether any issues occurred when
running the Davies algorithm to compute the p-value as the right tail of a
weighted sum of :math:`\chi^2(1)` distributions.
+-------------+-----------------------------------------+
| fault value | Description |
+=============+=========================================+
| 0 | no issues |
+------+------+-----------------------------------------+
| 1 | accuracy NOT achieved |
+------+------+-----------------------------------------+
| 2 | round-off error possibly significant |
+------+------+-----------------------------------------+
| 3 | invalid parameters |
+------+------+-----------------------------------------+
| 4 | unable to locate integration parameters |
+------+------+-----------------------------------------+
| 5 | out of memory |
+------+------+-----------------------------------------+
Parameters
----------
key_expr : :class:`.Expression`
Row-indexed expression for key associated to each row.
weight_expr : :class:`.Float64Expression`
Row-indexed expression for row weights.
y : :class:`.Float64Expression`
Column-indexed response expression.
If `logistic` is ``True``, all non-missing values must evaluate to 0 or
1. Note that a :class:`.BooleanExpression` will be implicitly converted
to a :class:`.Float64Expression` with this property.
x : :class:`.Float64Expression`
Entry-indexed expression for input variable.
covariates : :obj:`list` of :class:`.Float64Expression`
List of column-indexed covariate expressions.
logistic : :obj:`bool`
If true, use the logistic test rather than the linear test.
max_size : :obj:`int`
Maximum size of group on which to run the test.
accuracy : :obj:`float`
Accuracy achieved by the Davies algorithm if fault value is zero.
iterations : :obj:`int`
Maximum number of iterations attempted by the Davies algorithm.
Returns
-------
:class:`.Table`
Table of SKAT results.
"""
mt = matrix_table_source('skat/x', x)
check_entry_indexed('skat/x', x)
analyze('skat/key_expr', key_expr, mt._row_indices)
analyze('skat/weight_expr', weight_expr, mt._row_indices)
analyze('skat/y', y, mt._col_indices)
all_exprs = [key_expr, weight_expr, y]
for e in covariates:
all_exprs.append(e)
analyze('skat/covariates', e, mt._col_indices)
_warn_if_no_intercept('skat', covariates)
# FIXME: remove this logic when annotation is better optimized
if x in mt._fields_inverse:
x_field_name = mt._fields_inverse[x]
entry_expr = {}
else:
x_field_name = Env.get_uid()
entry_expr = {x_field_name: x}
y_field_name = '__y'
weight_field_name = '__weight'
key_field_name = '__key'
cov_field_names = list(f'__cov{i}' for i in range(len(covariates)))
mt = mt._select_all(col_exprs=dict(**{y_field_name: y},
**dict(zip(cov_field_names, covariates))),
row_exprs={weight_field_name: weight_expr,
key_field_name: key_expr},
entry_exprs=entry_expr)
config = {
'name': 'Skat',
'keyField': key_field_name,
'weightField': weight_field_name,
'xField': x_field_name,
'yField': y_field_name,
'covFields': cov_field_names,
'logistic': logistic,
'maxSize': max_size,
'accuracy': accuracy,
'iterations': iterations
}
return Table(ir.MatrixToTableApply(mt._mir, config))
@typecheck(p_value=expr_numeric,
approximate=bool)
def lambda_gc(p_value, approximate=True):
"""
Compute genomic inflation factor (lambda GC) from an Expression of p-values.
.. include:: ../_templates/experimental.rst
Parameters
----------
p_value : :class:`.NumericExpression`
Row-indexed numeric expression of p-values.
approximate : :obj:`bool`
If False, computes exact lambda GC (slower and uses more memory).
Returns
-------
:obj:`float`
Genomic inflation factor (lambda genomic control).
"""
check_row_indexed('lambda_gc', p_value)
t = table_source('lambda_gc', p_value)
med_chisq = _lambda_gc_agg(p_value, approximate)
return t.aggregate(med_chisq)
@typecheck(p_value=expr_numeric,
approximate=bool)
def _lambda_gc_agg(p_value, approximate=True):
chisq = hl.qchisqtail(p_value, 1)
if approximate:
med_chisq = hl.agg.filter(~hl.is_nan(p_value), hl.agg.approx_quantiles(chisq, 0.5))
else:
med_chisq = hl.agg.filter(~hl.is_nan(p_value), hl.median(hl.agg.collect(chisq)))
return med_chisq / hl.qchisqtail(0.5, 1)
@typecheck(ds=oneof(Table, MatrixTable),
keep_star=bool,
left_aligned=bool,
permit_shuffle=bool)
def split_multi(ds, keep_star=False, left_aligned=False, *, permit_shuffle=False):
"""Split multiallelic variants.
Warning
-------
In order to support a wide variety of data types, this function splits only
the variants on a :class:`.MatrixTable`, but **not the genotypes**. Use
:func:`.split_multi_hts` if possible, or split the genotypes yourself using
one of the entry modification methods: :meth:`.MatrixTable.annotate_entries`,
:meth:`.MatrixTable.select_entries`, :meth:`.MatrixTable.transmute_entries`.
The resulting dataset will be keyed by the split locus and alleles.
:func:`.split_multi` adds the following fields:
- `was_split` (*bool*) -- ``True`` if this variant was originally
multiallelic, otherwise ``False``.
- `a_index` (*int*) -- The original index of this alternate allele in the
multiallelic representation (NB: 1 is the first alternate allele or the
only alternate allele in a biallelic variant). For example, 1:100:A:T,C
splits into two variants: 1:100:A:T with ``a_index = 1`` and 1:100:A:C
with ``a_index = 2``.
- `old_locus` (*locus*) -- The original, unsplit locus.
- `old_alleles` (*array<str>*) -- The original, unsplit alleles.
All other fields are left unchanged.
Warning
-------
This method assumes `ds` contains one non-split variant per locus. This assumption permits the
most efficient implementation of this method.
If each locus in `ds` contains one multiallelic variant and one or more biallelic variants, you
can filter to the multiallelic variants, split those, and then combine the split variants with
the original biallelic variants.
For example, the following code splits a dataset `mt` which contains a mixture of split and
non-split variants.
>>> bi = mt.filter_rows(hl.len(mt.alleles) == 2)
>>> bi = bi.annotate_rows(was_split=False)
>>> multi = mt.filter_rows(hl.len(mt.alleles) > 2)
>>> split = hl.split_multi_hts(multi)
>>> mt = split.union_rows(bi)
Example
-------
:func:`.split_multi_hts`, which splits multiallelic variants for the HTS
genotype schema and updates the entry fields by downcoding the genotype, is
implemented as:
>>> sm = hl.split_multi(ds)
>>> pl = hl.or_missing(
... hl.is_defined(sm.PL),
... (hl.range(0, 3).map(lambda i: hl.min(hl.range(0, hl.len(sm.PL))
... .filter(lambda j: hl.downcode(hl.unphased_diploid_gt_index_call(j), sm.a_index) == hl.unphased_diploid_gt_index_call(i))
... .map(lambda j: sm.PL[j])))))
>>> split_ds = sm.annotate_entries(
... GT=hl.downcode(sm.GT, sm.a_index),
... AD=hl.or_missing(hl.is_defined(sm.AD),
... [hl.sum(sm.AD) - sm.AD[sm.a_index], sm.AD[sm.a_index]]),
... DP=sm.DP,
... PL=pl,
... GQ=hl.gq_from_pl(pl)).drop('old_locus', 'old_alleles')
See Also
--------
:func:`.split_multi_hts`
Parameters
----------
ds : :class:`.MatrixTable` or :class:`.Table`
An unsplit dataset.
keep_star : :obj:`bool`
Do not filter out * alleles.
left_aligned : :obj:`bool`
If ``True``, variants are assumed to be left aligned and have unique
loci. This avoids a shuffle. If the assumption is violated, an error
is generated.
permit_shuffle : :obj:`bool`
If ``True``, permit a data shuffle to sort out-of-order split results.
This will only be required if input data has duplicate loci, one of
which contains more than one alternate allele.
Returns
-------
:class:`.MatrixTable` or :class:`.Table`
"""
require_row_key_variant(ds, "split_multi")
new_id = Env.get_uid()
is_table = isinstance(ds, Table)
old_row = ds.row if is_table else ds._rvrow
kept_alleles = hl.range(1, hl.len(old_row.alleles))
if not keep_star:
kept_alleles = kept_alleles.filter(lambda i: old_row.alleles[i] != "*")
def new_struct(variant, i):
return hl.struct(alleles=variant.alleles,
locus=variant.locus,
a_index=i,
was_split=hl.len(old_row.alleles) > 2)
def split_rows(expr, rekey):
if isinstance(ds, MatrixTable):
mt = (ds.annotate_rows(**{new_id: expr})
.explode_rows(new_id))
if rekey:
mt = mt.key_rows_by()
else:
mt = mt.key_rows_by('locus')
new_row_expr = mt._rvrow.annotate(locus=mt[new_id]['locus'],
alleles=mt[new_id]['alleles'],
a_index=mt[new_id]['a_index'],
was_split=mt[new_id]['was_split'],
old_locus=mt.locus,
old_alleles=mt.alleles).drop(new_id)
mt = mt._select_rows('split_multi', new_row_expr)
if rekey:
return mt.key_rows_by('locus', 'alleles')
else:
return MatrixTable(ir.MatrixKeyRowsBy(mt._mir, ['locus', 'alleles'], is_sorted=True))
else:
assert isinstance(ds, Table)
ht = (ds.annotate(**{new_id: expr})
.explode(new_id))
if rekey:
ht = ht.key_by()
else:
ht = ht.key_by('locus')
new_row_expr = ht.row.annotate(locus=ht[new_id]['locus'],
alleles=ht[new_id]['alleles'],
a_index=ht[new_id]['a_index'],
was_split=ht[new_id]['was_split'],
old_locus=ht.locus,
old_alleles=ht.alleles).drop(new_id)
ht = ht._select('split_multi', new_row_expr)
if rekey:
return ht.key_by('locus', 'alleles')
else:
return Table(ir.TableKeyBy(ht._tir, ['locus', 'alleles'], is_sorted=True))
if left_aligned:
def make_struct(i):
def error_on_moved(v):
return (hl.case()
.when(v.locus == old_row.locus, new_struct(v, i))
.or_error("Found non-left-aligned variant in split_multi"))
return hl.bind(error_on_moved,
hl.min_rep(old_row.locus, [old_row.alleles[0], old_row.alleles[i]]))
return split_rows(hl.sorted(kept_alleles.map(make_struct)), permit_shuffle)
else:
def make_struct(i, cond):
def struct_or_empty(v):
return (hl.case()
.when(cond(v.locus), hl.array([new_struct(v, i)]))
.or_missing())
return hl.bind(struct_or_empty,
hl.min_rep(old_row.locus, [old_row.alleles[0], old_row.alleles[i]]))
def make_array(cond):
return hl.sorted(kept_alleles.flatmap(lambda i: make_struct(i, cond)))
left = split_rows(make_array(lambda locus: locus == ds['locus']), permit_shuffle)
moved = split_rows(make_array(lambda locus: locus != ds['locus']), True)
return left.union(moved) if is_table else left.union_rows(moved, _check_cols=False)
@typecheck(ds=oneof(Table, MatrixTable),
keep_star=bool,
left_aligned=bool,
vep_root=str,
permit_shuffle=bool)
def split_multi_hts(ds, keep_star=False, left_aligned=False, vep_root='vep', *, permit_shuffle=False):
"""Split multiallelic variants for datasets that contain one or more fields
from a standard high-throughput sequencing entry schema.
.. code-block:: text
struct {
GT: call,
AD: array<int32>,
DP: int32,
GQ: int32,
PL: array<int32>,
PGT: call,
PID: str
}
For other entry fields, write your own splitting logic using
:meth:`.MatrixTable.annotate_entries`.
Examples
--------
>>> hl.split_multi_hts(dataset).write('output/split.mt')
Warning
-------
This method assumes `ds` contains one non-split variant per locus. This assumption permits the
most efficient implementation of this method.
If each locus in `ds` contains one multiallelic variant and one or more biallelic variants, you
can filter to the multiallelic variants, split those, and then combine the split variants with
the original biallelic variants.
For example, the following code splits a dataset `mt` which contains a mixture of split and
non-split variants.
>>> bi = mt.filter_rows(hl.len(mt.alleles) == 2)
>>> bi = bi.annotate_rows(was_split=False)
>>> multi = mt.filter_rows(hl.len(mt.alleles) > 2)
>>> split = hl.split_multi_hts(multi)
>>> mt = split.union_rows(bi)
Notes
-----
We will explain by example. Consider a hypothetical 3-allelic
variant:
.. code-block:: text
A C,T 0/2:7,2,6:15:45:99,50,99,0,45,99
:func:`.split_multi_hts` will create two biallelic variants (one for each
alternate allele) at the same position
.. code-block:: text
A C 0/0:13,2:15:45:0,45,99
A T 0/1:9,6:15:50:50,0,99
Each multiallelic `GT` or `PGT` field is downcoded once for each alternate allele. A
call for an alternate allele maps to 1 in the biallelic variant
corresponding to itself and 0 otherwise. For example, in the example above,
0/2 maps to 0/0 and 0/1. The genotype 1/2 maps to 0/1 and 0/1.
The biallelic alt `AD` entry is just the multiallelic `AD` entry
corresponding to the alternate allele. The ref AD entry is the sum of the
other multiallelic entries.
The biallelic `DP` is the same as the multiallelic `DP`.
The biallelic `PL` entry for a genotype g is the minimum over `PL` entries
for multiallelic genotypes that downcode to g. For example, the `PL` for (A,
T) at 0/1 is the minimum of the PLs for 0/1 (50) and 1/2 (45), and thus 45.
Fixing an alternate allele and biallelic variant, downcoding gives a map
from multiallelic to biallelic alleles and genotypes. The biallelic `AD` entry
for an allele is just the sum of the multiallelic `AD` entries for alleles
that map to that allele. Similarly, the biallelic `PL` entry for a genotype is
the minimum over multiallelic `PL` entries for genotypes that map to that
genotype.
`GQ` is recomputed from `PL` if `PL` is provided and is not
missing. If not, it is copied from the original GQ.
Here is a second example for a het non-ref
.. code-block:: text
A C,T 1/2:2,8,6:16:45:99,50,99,45,0,99
splits as
.. code-block:: text
A C 0/1:8,8:16:45:45,0,99
A T 0/1:10,6:16:50:50,0,99
**VCF Info Fields**
Hail does not split fields in the info field. This means that if a
multiallelic site with `info.AC` value ``[10, 2]`` is split, each split
site will contain the same array ``[10, 2]``. The provided allele index
field `a_index` can be used to select the value corresponding to the split
allele's position:
>>> split_ds = hl.split_multi_hts(dataset)
>>> split_ds = split_ds.filter_rows(split_ds.info.AC[split_ds.a_index - 1] < 10,
... keep = False)
VCFs split by Hail and exported to new VCFs may be
incompatible with other tools, if action is not taken
first. Since the "Number" of the arrays in split multiallelic
sites no longer matches the structure on import ("A" for 1 per
allele, for example), Hail will export these fields with
number ".".
If the desired output is one value per site, then it is
possible to use annotate_variants_expr to remap these
values. Here is an example:
>>> split_ds = hl.split_multi_hts(dataset)
>>> split_ds = split_ds.annotate_rows(info = split_ds.info.annotate(AC = split_ds.info.AC[split_ds.a_index - 1]))
>>> hl.export_vcf(split_ds, 'output/export.vcf') # doctest: +SKIP
The info field AC in *data/export.vcf* will have ``Number=1``.
**New Fields**
:func:`.split_multi_hts` adds the following fields:
- `was_split` (*bool*) -- ``True`` if this variant was originally
multiallelic, otherwise ``False``.
- `a_index` (*int*) -- The original index of this alternate allele in the
multiallelic representation (NB: 1 is the first alternate allele or the
only alternate allele in a biallelic variant). For example, 1:100:A:T,C
splits into two variants: 1:100:A:T with ``a_index = 1`` and 1:100:A:C
with ``a_index = 2``.
See Also
--------
:func:`.split_multi`
Parameters
----------
ds : :class:`.MatrixTable` or :class:`.Table`
An unsplit dataset.
keep_star : :obj:`bool`
Do not filter out * alleles.
left_aligned : :obj:`bool`
If ``True``, variants are assumed to be left
aligned and have unique loci. This avoids a shuffle. If the assumption
is violated, an error is generated.
vep_root : :class:`str`
Top-level location of vep data. All variable-length VEP fields
(intergenic_consequences, motif_feature_consequences,
regulatory_feature_consequences, and transcript_consequences)
will be split properly (i.e. a_index corresponding to the VEP allele_num).
permit_shuffle : :obj:`bool`
If ``True``, permit a data shuffle to sort out-of-order split results.
This will only be required if input data has duplicate loci, one of
which contains more than one alternate allele.
Returns
-------
:class:`.MatrixTable` or :class:`.Table`
A biallelic variant dataset.
"""
split = split_multi(ds, keep_star=keep_star, left_aligned=left_aligned, permit_shuffle=permit_shuffle)
row_fields = set(ds.row)
update_rows_expression = {}
if vep_root in row_fields:
update_rows_expression[vep_root] = split[vep_root].annotate(**{
x: split[vep_root][x].filter(lambda csq: csq.allele_num == split.a_index)
for x in ('intergenic_consequences', 'motif_feature_consequences',
'regulatory_feature_consequences', 'transcript_consequences')})
if isinstance(ds, Table):
return split.annotate(**update_rows_expression).drop('old_locus', 'old_alleles')
split = split.annotate_rows(**update_rows_expression)
entry_fields = ds.entry
expected_field_types = {
'GT': hl.tcall,
'AD': hl.tarray(hl.tint),
'DP': hl.tint,
'GQ': hl.tint,
'PL': hl.tarray(hl.tint),
'PGT': hl.tcall,
'PID': hl.tstr
}
bad_fields = []
for field in entry_fields:
if field in expected_field_types and entry_fields[field].dtype != expected_field_types[field]:
bad_fields.append((field, entry_fields[field].dtype, expected_field_types[field]))
if bad_fields:
msg = '\n '.join([f"'{x[0]}'\tfound: {x[1]}\texpected: {x[2]}" for x in bad_fields])
raise TypeError("'split_multi_hts': Found invalid types for the following fields:\n " + msg)
update_entries_expression = {}
if 'GT' in entry_fields:
update_entries_expression['GT'] = hl.downcode(split.GT, split.a_index)
if 'DP' in entry_fields:
update_entries_expression['DP'] = split.DP
if 'AD' in entry_fields:
update_entries_expression['AD'] = hl.or_missing(hl.is_defined(split.AD),
[hl.sum(split.AD) - split.AD[split.a_index], split.AD[split.a_index]])
if 'PL' in entry_fields:
pl = hl.or_missing(
hl.is_defined(split.PL),
(hl.range(0, 3).map(lambda i:
hl.min((hl.range(0, hl.triangle(split.old_alleles.length()))
.filter(lambda j: hl.downcode(hl.unphased_diploid_gt_index_call(j),
split.a_index).unphased_diploid_gt_index() == i
).map(lambda j: split.PL[j]))))))
if 'GQ' in entry_fields:
update_entries_expression['PL'] = pl
update_entries_expression['GQ'] = hl.or_else(hl.gq_from_pl(pl), split.GQ)
else:
update_entries_expression['PL'] = pl
else:
if 'GQ' in entry_fields:
update_entries_expression['GQ'] = split.GQ
if 'PGT' in entry_fields:
update_entries_expression['PGT'] = hl.downcode(split.PGT, split.a_index)
if 'PID' in entry_fields:
update_entries_expression['PID'] = split.PID
return split.annotate_entries(**update_entries_expression).drop('old_locus', 'old_alleles')
@typecheck(call_expr=expr_call)
def genetic_relatedness_matrix(call_expr) -> BlockMatrix:
r"""Compute the genetic relatedness matrix (GRM).
Examples
--------
>>> grm = hl.genetic_relatedness_matrix(dataset.GT)
Notes
-----
The genetic relationship matrix (GRM) :math:`G` encodes genetic correlation
between each pair of samples. It is defined by :math:`G = MM^T` where
:math:`M` is a standardized version of the genotype matrix, computed as
follows. Let :math:`C` be the :math:`n \times m` matrix of raw genotypes
in the variant dataset, with rows indexed by :math:`n` samples and columns
indexed by :math:`m` bialellic autosomal variants; :math:`C_{ij}` is the
number of alternate alleles of variant :math:`j` carried by sample
:math:`i`, which can be 0, 1, 2, or missing. For each variant :math:`j`,
the sample alternate allele frequency :math:`p_j` is computed as half the
mean of the non-missing entries of column :math:`j`. Entries of :math:`M`
are then mean-centered and variance-normalized as
.. math::
M_{ij} = \frac{C_{ij}-2p_j}{\sqrt{2p_j(1-p_j)m}},
with :math:`M_{ij} = 0` for :math:`C_{ij}` missing (i.e. mean genotype
imputation). This scaling normalizes genotype variances to a common value
:math:`1/m` for variants in Hardy-Weinberg equilibrium and is further
motivated in the paper `Patterson, Price and Reich, 2006
<http://journals.plos.org/plosgenetics/article?id=10.1371/journal.pgen.0020190>`__.
(The resulting amplification of signal from the low end of the allele
frequency spectrum will also introduce noise for rare variants; common
practice is to filter out variants with minor allele frequency below some
cutoff.) The factor :math:`1/m` gives each sample row approximately unit
total variance (assuming linkage equilibrium) so that the diagonal entries
of the GRM are approximately 1. Equivalently,
.. math::
G_{ik} = \frac{1}{m} \sum_{j=1}^m \frac{(C_{ij}-2p_j)(C_{kj}-2p_j)}{2 p_j (1-p_j)}
This method drops variants with :math:`p_j = 0` or :math:`p_j = 1` before
computing kinship.
Parameters
----------
call_expr : :class:`.CallExpression`
Entry-indexed call expression with columns corresponding
to samples.
Returns
-------
:class:`.BlockMatrix`
Genetic relatedness matrix for all samples. Row and column indices
correspond to matrix table column index.
"""
mt = matrix_table_source('genetic_relatedness_matrix/call_expr', call_expr)
check_entry_indexed('genetic_relatedness_matrix/call_expr', call_expr)
mt = mt.select_entries(__gt=call_expr.n_alt_alleles()).unfilter_entries()
mt = mt.select_rows(__AC=agg.sum(mt.__gt),
__n_called=agg.count_where(hl.is_defined(mt.__gt)))
mt = mt.filter_rows((mt.__AC > 0) & (mt.__AC < 2 * mt.__n_called))
mt = mt.select_rows(__mean_gt=mt.__AC / mt.__n_called)
mt = mt.annotate_rows(__hwe_scaled_std_dev=hl.sqrt(mt.__mean_gt * (2 - mt.__mean_gt)))
normalized_gt = hl.or_else((mt.__gt - mt.__mean_gt) / mt.__hwe_scaled_std_dev, 0.0)
bm = BlockMatrix.from_entry_expr(normalized_gt)
return (bm.T @ bm) / (bm.n_rows / 2.0)
@typecheck(call_expr=expr_call)
def realized_relationship_matrix(call_expr) -> BlockMatrix:
r"""Computes the realized relationship matrix (RRM).
Examples
--------
>>> rrm = hl.realized_relationship_matrix(dataset.GT)
Notes
-----
The realized relationship matrix (RRM) is defined as follows. Consider the
:math:`n \times m` matrix :math:`C` of raw genotypes, with rows indexed by
:math:`n` samples and columns indexed by the :math:`m` bialellic autosomal
variants; :math:`C_{ij}` is the number of alternate alleles of variant
:math:`j` carried by sample :math:`i`, which can be 0, 1, 2, or missing. For
each variant :math:`j`, the sample alternate allele frequency :math:`p_j` is
computed as half the mean of the non-missing entries of column :math:`j`.
Entries of :math:`M` are then mean-centered and variance-normalized as
.. math::
M_{ij} =
\frac{C_{ij}-2p_j}
{\sqrt{\frac{m}{n} \sum_{k=1}^n (C_{ij}-2p_j)^2}},
with :math:`M_{ij} = 0` for :math:`C_{ij}` missing (i.e. mean genotype
imputation). This scaling normalizes each variant column to have empirical
variance :math:`1/m`, which gives each sample row approximately unit total
variance (assuming linkage equilibrium) and yields the :math:`n \times n`
sample correlation or realized relationship matrix (RRM) :math:`K` as simply
.. math::
K = MM^T
Note that the only difference between the realized relationship matrix and
the genetic relatedness matrix (GRM) used in
:func:`.realized_relationship_matrix` is the variant (column) normalization:
where RRM uses empirical variance, GRM uses expected variance under
Hardy-Weinberg Equilibrium.
This method drops variants with zero variance before computing kinship.
Parameters
----------
call_expr : :class:`.CallExpression`
Entry-indexed call expression on matrix table with columns corresponding
to samples.
Returns
-------
:class:`.BlockMatrix`
Realized relationship matrix for all samples. Row and column indices
correspond to matrix table column index.
"""
mt = matrix_table_source('realized_relationship_matrix/call_expr', call_expr)
check_entry_indexed('realized_relationship_matrix/call_expr', call_expr)
mt = mt.select_entries(__gt=call_expr.n_alt_alleles()).unfilter_entries()
mt = mt.select_rows(__AC=agg.sum(mt.__gt),
__ACsq=agg.sum(mt.__gt * mt.__gt),
__n_called=agg.count_where(hl.is_defined(mt.__gt)))
mt = mt.select_rows(__mean_gt=mt.__AC / mt.__n_called,
__centered_length=hl.sqrt(mt.__ACsq - (mt.__AC ** 2) / mt.__n_called))
fmt = mt.filter_rows(mt.__centered_length > 0.1) # truly non-zero values are at least sqrt(0.5)
normalized_gt = hl.or_else((fmt.__gt - fmt.__mean_gt) / fmt.__centered_length, 0.0)
try:
bm = BlockMatrix.from_entry_expr(normalized_gt)
return (bm.T @ bm) / (bm.n_rows / bm.n_cols)
except FatalError as fe:
raise FatalError("Could not convert MatrixTable to BlockMatrix. It's possible all variants were dropped by variance filter.\n"
"Check that the input MatrixTable has at least two samples in it: mt.count_cols().") from fe
@typecheck(entry_expr=expr_float64, block_size=nullable(int))
def row_correlation(entry_expr, block_size=None) -> BlockMatrix:
"""Computes the correlation matrix between row vectors.
Examples
--------
Consider the following dataset with three variants and four samples:
>>> data = [{'v': '1:1:A:C', 's': 'a', 'GT': hl.Call([0, 0])},
... {'v': '1:1:A:C', 's': 'b', 'GT': hl.Call([0, 0])},
... {'v': '1:1:A:C', 's': 'c', 'GT': hl.Call([0, 1])},
... {'v': '1:1:A:C', 's': 'd', 'GT': hl.Call([1, 1])},
... {'v': '1:2:G:T', 's': 'a', 'GT': hl.Call([0, 1])},
... {'v': '1:2:G:T', 's': 'b', 'GT': hl.Call([1, 1])},
... {'v': '1:2:G:T', 's': 'c', 'GT': hl.Call([0, 1])},
... {'v': '1:2:G:T', 's': 'd', 'GT': hl.Call([0, 0])},
... {'v': '1:3:C:G', 's': 'a', 'GT': hl.Call([0, 1])},
... {'v': '1:3:C:G', 's': 'b', 'GT': hl.Call([0, 0])},
... {'v': '1:3:C:G', 's': 'c', 'GT': hl.Call([1, 1])},
... {'v': '1:3:C:G', 's': 'd', 'GT': hl.missing(hl.tcall)}]
>>> ht = hl.Table.parallelize(data, hl.dtype('struct{v: str, s: str, GT: call}'))
>>> mt = ht.to_matrix_table(row_key=['v'], col_key=['s'])
Compute genotype correlation between all pairs of variants:
>>> ld = hl.row_correlation(mt.GT.n_alt_alleles())
>>> ld.to_numpy()
array([[ 1. , -0.85280287, 0.42640143],
[-0.85280287, 1. , -0.5 ],
[ 0.42640143, -0.5 , 1. ]])
Compute genotype correlation between consecutively-indexed variants:
>>> ld.sparsify_band(lower=0, upper=1).to_numpy()
array([[ 1. , -0.85280287, 0. ],
[ 0. , 1. , -0.5 ],
[ 0. , 0. , 1. ]])
Warning
-------
Rows with a constant value (i.e., zero variance) will result `nan`
correlation values. To avoid this, first check that all rows vary or filter
out constant rows (for example, with the help of :func:`.aggregators.stats`).
Notes
-----
In this method, each row of entries is regarded as a vector with elements
defined by `entry_expr` and missing values mean-imputed per row.
The ``(i, j)`` element of the resulting block matrix is the correlation
between rows ``i`` and ``j`` (as 0-indexed by order in the matrix table;
see :meth:`~hail.MatrixTable.add_row_index`).
The correlation of two vectors is defined as the
`Pearson correlation coeffecient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`__
between the corresponding empirical distributions of elements,
or equivalently as the cosine of the angle between the vectors.
This method has two stages:
- writing the row-normalized block matrix to a temporary file on persistent
disk with :meth:`.BlockMatrix.from_entry_expr`. The parallelism is
``n_rows / block_size``.
- reading and multiplying this block matrix by its transpose. The
parallelism is ``(n_rows / block_size)^2`` if all blocks are computed.
Warning
-------
See all warnings on :meth:`.BlockMatrix.from_entry_expr`. In particular,
for large matrices, it may be preferable to run the two stages separately,
saving the row-normalized block matrix to a file on external storage with
:meth:`.BlockMatrix.write_from_entry_expr`.
The resulting number of matrix elements is the square of the number of rows
in the matrix table, so computing the full matrix may be infeasible. For
example, ten million rows would produce 800TB of float64 values. The
block-sparse representation on BlockMatrix may be used to work efficiently
with regions of such matrices, as in the second example above and
:meth:`ld_matrix`.
To prevent excessive re-computation, be sure to write and read the (possibly
block-sparsified) result before multiplication by another matrix.
Parameters
----------
entry_expr : :class:`.Float64Expression`
Entry-indexed numeric expression on matrix table.
block_size : :obj:`int`, optional
Block size. Default given by :meth:`.BlockMatrix.default_block_size`.
Returns
-------
:class:`.BlockMatrix`
Correlation matrix between row vectors. Row and column indices
correspond to matrix table row index.
"""
bm = BlockMatrix.from_entry_expr(entry_expr, mean_impute=True, center=True, normalize=True, block_size=block_size)
return bm @ bm.T
@typecheck(entry_expr=expr_float64,
locus_expr=expr_locus(),
radius=oneof(int, float),
coord_expr=nullable(expr_float64),
block_size=nullable(int))
def ld_matrix(entry_expr, locus_expr, radius, coord_expr=None, block_size=None) -> BlockMatrix:
"""Computes the windowed correlation (linkage disequilibrium) matrix between
variants.
Examples
--------
Consider the following dataset consisting of three variants with centimorgan
coordinates and four samples:
>>> data = [{'v': '1:1:A:C', 'cm': 0.1, 's': 'a', 'GT': hl.Call([0, 0])},
... {'v': '1:1:A:C', 'cm': 0.1, 's': 'b', 'GT': hl.Call([0, 0])},
... {'v': '1:1:A:C', 'cm': 0.1, 's': 'c', 'GT': hl.Call([0, 1])},
... {'v': '1:1:A:C', 'cm': 0.1, 's': 'd', 'GT': hl.Call([1, 1])},
... {'v': '1:2000000:G:T', 'cm': 0.9, 's': 'a', 'GT': hl.Call([0, 1])},
... {'v': '1:2000000:G:T', 'cm': 0.9, 's': 'b', 'GT': hl.Call([1, 1])},
... {'v': '1:2000000:G:T', 'cm': 0.9, 's': 'c', 'GT': hl.Call([0, 1])},
... {'v': '1:2000000:G:T', 'cm': 0.9, 's': 'd', 'GT': hl.Call([0, 0])},
... {'v': '2:1:C:G', 'cm': 0.2, 's': 'a', 'GT': hl.Call([0, 1])},
... {'v': '2:1:C:G', 'cm': 0.2, 's': 'b', 'GT': hl.Call([0, 0])},
... {'v': '2:1:C:G', 'cm': 0.2, 's': 'c', 'GT': hl.Call([1, 1])},
... {'v': '2:1:C:G', 'cm': 0.2, 's': 'd', 'GT': hl.missing(hl.tcall)}]
>>> ht = hl.Table.parallelize(data, hl.dtype('struct{v: str, s: str, cm: float64, GT: call}'))
>>> ht = ht.transmute(**hl.parse_variant(ht.v))
>>> mt = ht.to_matrix_table(row_key=['locus', 'alleles'], col_key=['s'], row_fields=['cm'])
Compute linkage disequilibrium between all pairs of variants on the same
contig and within two megabases:
>>> ld = hl.ld_matrix(mt.GT.n_alt_alleles(), mt.locus, radius=2e6)
>>> ld.to_numpy()
array([[ 1. , -0.85280287, 0. ],
[-0.85280287, 1. , 0. ],
[ 0. , 0. , 1. ]])
Within one megabases:
>>> ld = hl.ld_matrix(mt.GT.n_alt_alleles(), mt.locus, radius=1e6)
>>> ld.to_numpy()
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
Within one centimorgan:
>>> ld = hl.ld_matrix(mt.GT.n_alt_alleles(), mt.locus, radius=1.0, coord_expr=mt.cm)
>>> ld.to_numpy()
array([[ 1. , -0.85280287, 0. ],
[-0.85280287, 1. , 0. ],
[ 0. , 0. , 1. ]])
Within one centimorgan, and only calculate the upper triangle:
>>> ld = hl.ld_matrix(mt.GT.n_alt_alleles(), mt.locus, radius=1.0, coord_expr=mt.cm)
>>> ld = ld.sparsify_triangle()
>>> ld.to_numpy()
array([[ 1. , -0.85280287, 0. ],
[ 0. , 1. , 0. ],
[ 0. , 0. , 1. ]])
Notes
-----
This method sparsifies the result of :meth:`row_correlation` using
:func:`.linalg.utils.locus_windows` and
:meth:`.BlockMatrix.sparsify_row_intervals`
in order to only compute linkage disequilibrium between nearby
variants. Use :meth:`row_correlation` directly to calculate correlation
without windowing.
More precisely, variants are 0-indexed by their order in the matrix table
(see :meth:`~hail.MatrixTable.add_row_index`). Each variant is regarded as a vector of
elements defined by `entry_expr`, typically the number of alternate alleles
or genotype dosage. Missing values are mean-imputed within variant.
The method produces a symmetric block-sparse matrix supported in a
neighborhood of the diagonal. If variants :math:`i` and :math:`j` are on the
same contig and within `radius` base pairs (inclusive) then the
:math:`(i, j)` element is their
`Pearson correlation coefficient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`__.
Otherwise, the :math:`(i, j)` element is ``0.0``.
Rows with a constant value (i.e., zero variance) will result in ``nan``
correlation values. To avoid this, first check that all variants vary or
filter out constant variants (for example, with the help of
:func:`.aggregators.stats`).
If the :meth:`.global_position` on `locus_expr` is not in ascending order,
this method will fail. Ascending order should hold for a matrix table keyed
by locus or variant (and the associated row table), or for a table that's
been ordered by `locus_expr`.
Set `coord_expr` to use a value other than position to define the windows.
This row-indexed numeric expression must be non-missing, non-``nan``, on the
same source as `locus_expr`, and ascending with respect to locus
position for each contig; otherwise the method will raise an error.
Warning
-------
See the warnings in :meth:`row_correlation`. In particular, for large
matrices it may be preferable to run its stages separately.
`entry_expr` and `locus_expr` are implicitly aligned by row-index, though
they need not be on the same source. If their sources differ in the number
of rows, an error will be raised; otherwise, unintended misalignment may
silently produce unexpected results.
Parameters
----------
entry_expr : :class:`.Float64Expression`
Entry-indexed numeric expression on matrix table.
locus_expr : :class:`.LocusExpression`
Row-indexed locus expression on a table or matrix table that is
row-aligned with the matrix table of `entry_expr`.
radius: :obj:`int` or :obj:`float`
Radius of window for row values.
coord_expr: :class:`.Float64Expression`, optional
Row-indexed numeric expression for the row value on the same table or
matrix table as `locus_expr`.
By default, the row value is given by the locus position.
block_size : :obj:`int`, optional
Block size. Default given by :meth:`.BlockMatrix.default_block_size`.
Returns
-------
:class:`.BlockMatrix`
Windowed correlation matrix between variants.
Row and column indices correspond to matrix table variant index.
"""
starts_and_stops = hl.linalg.utils.locus_windows(locus_expr, radius, coord_expr, _localize=False)
starts_and_stops = hl.tuple([starts_and_stops[0].map(lambda i: hl.int64(i)), starts_and_stops[1].map(lambda i: hl.int64(i))])
ld = hl.row_correlation(entry_expr, block_size)
return ld._sparsify_row_intervals_expr(starts_and_stops, blocks_only=False)
@typecheck(n_populations=int,
n_samples=int,
n_variants=int,
n_partitions=nullable(int),
pop_dist=nullable(sequenceof(numeric)),
fst=nullable(sequenceof(numeric)),
af_dist=nullable(expr_any),
reference_genome=reference_genome_type,
mixture=bool)
def balding_nichols_model(n_populations, n_samples, n_variants, n_partitions=None,
pop_dist=None, fst=None, af_dist=None,
reference_genome='default', mixture=False) -> MatrixTable:
r"""Generate a matrix table of variants, samples, and genotypes using the
Balding-Nichols or Pritchard-Stephens-Donnelly model.
Examples
--------
Generate a matrix table of genotypes with 1000 variants and 100 samples
across 3 populations:
>>> bn_ds = hl.balding_nichols_model(3, 100, 1000, reference_genome='GRCh37')
Generate a matrix table using 4 populations, 40 samples, 150 variants, 3
partitions, population distribution ``[0.1, 0.2, 0.3, 0.4]``,
:math:`F_{ST}` values ``[.02, .06, .04, .12]``, ancestral allele
frequencies drawn from a truncated beta distribution with ``a = 0.01`` and
``b = 0.05`` over the interval ``[0.05, 1]``, and random seed 1:
>>> hl.set_global_seed(1)
>>> bn_ds = hl.balding_nichols_model(4, 40, 150, 3,
... pop_dist=[0.1, 0.2, 0.3, 0.4],
... fst=[.02, .06, .04, .12],
... af_dist=hl.rand_beta(a=0.01, b=2.0, lower=0.05, upper=1.0))
To guarantee reproducibility, we set the Hail global seed with
:func:`.set_global_seed` immediately prior to generating the dataset.
Notes
-----
This method simulates a matrix table of variants, samples, and genotypes
using the Balding-Nichols model, which we now define.
- :math:`K` populations are labeled by integers :math:`0, 1, \dots, K - 1`.
- :math:`N` samples are labeled by strings :math:`0, 1, \dots, N - 1`.
- :math:`M` variants are defined as ``1:1:A:C``, ``1:2:A:C``, ...,
``1:M:A:C``.
- The default distribution for population assignment :math:`\pi` is uniform.
- The default ancestral frequency distribution :math:`P_0` is uniform on
:math:`[0.1, 0.9]`.
All three classes are located in ``hail.stats``.
- The default :math:`F_{ST}` values are all :math:`0.1`.
The Balding-Nichols model models genotypes of individuals from a structured
population comprising :math:`K` homogeneous modern populations that have
each diverged from a single ancestral population (a `star phylogeny`). Each
sample is assigned a population by sampling from the categorical
distribution :math:`\pi`. Note that the actual size of each population is
random.
Variants are modeled as biallelic and unlinked. Ancestral allele
frequencies are drawn independently for each variant from a frequency
spectrum :math:`P_0`. The extent of genetic drift of each modern population
from the ancestral population is defined by the corresponding :math:`F_{ST}`
parameter :math:`F_k` (here and below, lowercase indices run over a range
bounded by the corresponding uppercase parameter, e.g. :math:`k = 1, \ldots,
K`). For each variant and population, allele frequencies are drawn from a
`beta distribution <https://en.wikipedia.org/wiki/Beta_distribution>`__
whose parameters are determined by the ancestral allele frequency and
:math:`F_{ST}` parameter. The beta distribution gives a continuous
approximation of the effect of genetic drift. We denote sample population
assignments by :math:`k_n`, ancestral allele frequencies by :math:`p_m`,
population allele frequencies by :math:`p_{k, m}`, and diploid, unphased
genotype calls by :math:`g_{n, m}` (0, 1, and 2 correspond to homozygous
reference, heterozygous, and homozygous variant, respectively).
The generative model is then given by:
.. math::
\begin{aligned}
k_n \,&\sim\, \pi \\
p_m \,&\sim\, P_0 \\
p_{k,m} \mid p_m\,&\sim\, \mathrm{Beta}(\mu = p_m,\, \sigma^2 = F_k p_m (1 - p_m)) \\
g_{n,m} \mid k_n, p_{k, m} \,&\sim\, \mathrm{Binomial}(2, p_{k_n, m})
\end{aligned}
The beta distribution by its mean and variance above; the usual parameters
are :math:`a = (1 - p) \frac{1 - F}{F}` and :math:`b = p \frac{1 - F}{F}` with
:math:`F = F_k` and :math:`p = p_m`.
The resulting dataset has the following fields.
Global fields:
- `bn.n_populations` (:py:data:`.tint32`) -- Number of populations.
- `bn.n_samples` (:py:data:`.tint32`) -- Number of samples.
- `bn.n_variants` (:py:data:`.tint32`) -- Number of variants.
- `bn.n_partitions` (:py:data:`.tint32`) -- Number of partitions.
- `bn.pop_dist` (:class:`.tarray` of :py:data:`.tfloat64`) -- Population distribution indexed by
population.
- `bn.fst` (:class:`.tarray` of :py:data:`.tfloat64`) -- :math:`F_{ST}` values indexed by
population.
- `bn.seed` (:py:data:`.tint32`) -- Random seed.
- `bn.mixture` (:py:data:`.tbool`) -- Value of `mixture` parameter.
Row fields:
- `locus` (:class:`.tlocus`) -- Variant locus (key field).
- `alleles` (:class:`.tarray` of :py:data:`.tstr`) -- Variant alleles (key field).
- `ancestral_af` (:py:data:`.tfloat64`) -- Ancestral allele frequency.
- `af` (:class:`.tarray` of :py:data:`.tfloat64`) -- Modern allele frequencies indexed by
population.
Column fields:
- `sample_idx` (:py:data:`.tint32`) - Sample index (key field).
- `pop` (:py:data:`.tint32`) -- Population of sample.
Entry fields:
- `GT` (:py:data:`.tcall`) -- Genotype call (diploid, unphased).
For the `Pritchard-Stephens-Donnelly model <http://www.genetics.org/content/155/2/945.long>`__,
set the `mixture` to true to treat `pop_dist` as the parameters of the
Dirichlet distribution describing admixture between the modern populations.
In this case, the type of `pop` is :class:`.tarray` of
:py:data:`.tfloat64` and the value is the mixture proportions.
Parameters
----------
n_populations : :obj:`int`
Number of modern populations.
n_samples : :obj:`int`
Total number of samples.
n_variants : :obj:`int`
Number of variants.
n_partitions : :obj:`int`, optional
Number of partitions.
Default is 1 partition per million entries or 8, whichever is larger.
pop_dist : :obj:`list` of :obj:`float`, optional
Unnormalized population distribution, a list of length
`n_populations` with non-negative values.
Default is ``[1, ..., 1]``.
fst : :obj:`list` of :obj:`float`, optional
:math:`F_{ST}` values, a list of length `n_populations` with values
in (0, 1). Default is ``[0.1, ..., 0.1]``.
af_dist : :class:`.Float64Expression`, optional
Representing a random function. Ancestral allele frequency
distribution. Default is :func:`.rand_unif` over the range
`[0.1, 0.9]` with seed 0.
reference_genome : :class:`str` or :class:`.ReferenceGenome`
Reference genome to use.
mixture : :obj:`bool`
Treat `pop_dist` as the parameters of a Dirichlet distribution,
as in the Prichard-Stevens-Donnelly model.
Returns
-------
:class:`.MatrixTable`
Simulated matrix table of variants, samples, and genotypes.
"""
if pop_dist is None:
pop_dist = [1 for _ in range(n_populations)]
if fst is None:
fst = [0.1 for _ in range(n_populations)]
if af_dist is None:
af_dist = hl.rand_unif(0.1, 0.9, seed=0)
if n_partitions is None:
n_partitions = max(8, int(n_samples * n_variants / (128 * 1024 * 1024)))
# verify args
for name, var in {"populations": n_populations,
"samples": n_samples,
"variants": n_variants,
"partitions": n_partitions}.items():
if var < 1:
raise ValueError("n_{} must be positive, got {}".format(name, var))
for name, var in {"pop_dist": pop_dist, "fst": fst}.items():
if len(var) != n_populations:
raise ValueError("{} must be of length n_populations={}, got length {}"
.format(name, n_populations, len(var)))
if any(x < 0 for x in pop_dist):
raise ValueError("pop_dist must be non-negative, got {}"
.format(pop_dist))
if any(x <= 0 or x >= 1 for x in fst):
raise ValueError("elements of fst must satisfy 0 < x < 1, got {}"
.format(fst))
# verify af_dist
if not af_dist._is_scalar:
raise ExpressionException('balding_nichols_model expects af_dist to '
+ 'have scalar arguments: found expression '
+ 'from source {}'
.format(af_dist._indices.source))
if af_dist.dtype != tfloat64:
raise ValueError("af_dist must be a hail function with return type tfloat64.")
info("balding_nichols_model: generating genotypes for {} populations, {} samples, and {} variants..."
.format(n_populations, n_samples, n_variants))
# generate matrix table
bn = hl.utils.range_matrix_table(n_variants, n_samples, n_partitions)
bn = bn.annotate_globals(
bn=hl.struct(n_populations=n_populations,
n_samples=n_samples,
n_variants=n_variants,
n_partitions=n_partitions,
pop_dist=pop_dist,
fst=fst,
mixture=mixture))
# col info
pop_f = hl.rand_dirichlet if mixture else hl.rand_cat
bn = bn.key_cols_by(sample_idx=bn.col_idx)
bn = bn.select_cols(pop=pop_f(pop_dist))
# row info
bn = bn.key_rows_by(locus=hl.locus_from_global_position(bn.row_idx, reference_genome=reference_genome),
alleles=['A', 'C'])
bn = bn.select_rows(ancestral_af=af_dist,
af=hl.bind(lambda ancestral:
hl.array([(1 - x) / x for x in fst])
.map(lambda x:
hl.rand_beta(ancestral * x,
(1 - ancestral) * x)),
af_dist))
# entry info
p = hl.sum(bn.pop * bn.af) if mixture else bn.af[bn.pop]
idx = hl.rand_cat([(1 - p) ** 2, 2 * p * (1 - p), p ** 2])
return bn.select_entries(GT=hl.unphased_diploid_gt_index_call(idx))
@typecheck(mt=MatrixTable, f=anytype)
def filter_alleles(mt: MatrixTable,
f: Callable) -> MatrixTable:
"""Filter alternate alleles.
.. include:: ../_templates/req_tvariant.rst
Examples
--------
Keep SNPs:
>>> ds_result = hl.filter_alleles(ds, lambda allele, i: hl.is_snp(ds.alleles[0], allele))
Keep alleles with AC > 0:
>>> ds_result = hl.filter_alleles(ds, lambda a, allele_index: ds.info.AC[allele_index - 1] > 0)
Update the AC field of the resulting dataset:
>>> updated_info = ds_result.info.annotate(AC = ds_result.new_to_old.map(lambda i: ds_result.info.AC[i-1]))
>>> ds_result = ds_result.annotate_rows(info = updated_info)
Notes
-----
The following new fields are generated:
- `old_locus` (``locus``) -- The old locus, before filtering and computing
the minimal representation.
- `old_alleles` (``array<str>``) -- The old alleles, before filtering and
computing the minimal representation.
- `old_to_new` (``array<int32>``) -- An array that maps old allele index to
new allele index. Its length is the same as `old_alleles`. Alleles that
are filtered are missing.
- `new_to_old` (``array<int32>``) -- An array that maps new allele index to
the old allele index. Its length is the same as the modified `alleles`
field.
If all alternate alleles of a variant are filtered out, the variant itself
is filtered out.
**Using** `f`
The `f` argument is a function or lambda evaluated per alternate allele to
determine whether that allele is kept. If `f` evaluates to ``True``, the
allele is kept. If `f` evaluates to ``False`` or missing, the allele is
removed.
`f` is a function that takes two arguments: the allele string (of type
:class:`.StringExpression`) and the allele index (of type
:class:`.Int32Expression`), and returns a boolean expression. This can
be either a defined function or a lambda. For example, these two usages
are equivalent:
(with a lambda)
>>> ds_result = hl.filter_alleles(ds, lambda allele, i: hl.is_snp(ds.alleles[0], allele))
(with a defined function)
>>> def filter_f(allele, allele_index):
... return hl.is_snp(ds.alleles[0], allele)
>>> ds_result = hl.filter_alleles(ds, filter_f)
Warning
-------
:func:`.filter_alleles` does not update any fields other than `locus` and
`alleles`. This means that row fields like allele count (AC) and entry
fields like allele depth (AD) can become meaningless unless they are also
updated. You can update them with :meth:`.annotate_rows` and
:meth:`.annotate_entries`.
See Also
--------
:func:`.filter_alleles_hts`
Parameters
----------
mt : :class:`.MatrixTable`
Dataset.
f : callable
Function from (allele: :class:`.StringExpression`, allele_index:
:class:`.Int32Expression`) to :class:`.BooleanExpression`
Returns
-------
:class:`.MatrixTable`
"""
require_row_key_variant(mt, 'filter_alleles')
inclusion = hl.range(0, hl.len(mt.alleles)).map(lambda i: (i == 0) | hl.bind(lambda ii: f(mt.alleles[ii], ii), i))
# old locus, old alleles, new to old, old to new
mt = mt.annotate_rows(__allele_inclusion=inclusion,
old_locus=mt.locus,
old_alleles=mt.alleles)
new_to_old = (hl.enumerate(mt.__allele_inclusion)
.filter(lambda elt: elt[1])
.map(lambda elt: elt[0]))
old_to_new_dict = (hl.dict(hl.enumerate(hl.enumerate(mt.alleles)
.filter(lambda elt: mt.__allele_inclusion[elt[0]]))
.map(lambda elt: (elt[1][1], elt[0]))))
old_to_new = hl.bind(lambda d: mt.alleles.map(lambda a: d.get(a)), old_to_new_dict)
mt = mt.annotate_rows(old_to_new=old_to_new, new_to_old=new_to_old)
new_locus_alleles = hl.min_rep(mt.locus, mt.new_to_old.map(lambda i: mt.alleles[i]))
mt = mt.annotate_rows(__new_locus=new_locus_alleles.locus, __new_alleles=new_locus_alleles.alleles)
mt = mt.filter_rows(hl.len(mt.__new_alleles) > 1)
left = mt.filter_rows((mt.locus == mt.__new_locus) & (mt.alleles == mt.__new_alleles))
right = mt.filter_rows((mt.locus != mt.__new_locus) | (mt.alleles != mt.__new_alleles))
right = right.key_rows_by(locus=right.__new_locus, alleles=right.__new_alleles)
return left.union_rows(right, _check_cols=False).drop('__allele_inclusion', '__new_locus', '__new_alleles')
@typecheck(mt=MatrixTable, f=anytype, subset=bool)
def filter_alleles_hts(mt: MatrixTable,
f: Callable,
subset: bool = False) -> MatrixTable:
"""Filter alternate alleles and update standard GATK entry fields.
Examples
--------
Filter to SNP alleles using the subset strategy:
>>> ds_result = hl.filter_alleles_hts(
... ds,
... lambda allele, _: hl.is_snp(ds.alleles[0], allele),
... subset=True)
Update the AC field of the resulting dataset:
>>> updated_info = ds_result.info.annotate(AC = ds_result.new_to_old.map(lambda i: ds_result.info.AC[i-1]))
>>> ds_result = ds_result.annotate_rows(info = updated_info)
Notes
-----
For usage of the `f` argument, see the :func:`.filter_alleles`
documentation.
:func:`.filter_alleles_hts` requires the dataset have the GATK VCF schema,
namely the following entry fields in this order:
.. code-block:: text
GT: call
AD: array<int32>
DP: int32
GQ: int32
PL: array<int32>
Use :meth:`.MatrixTable.select_entries` to rearrange these fields if
necessary.
The following new fields are generated:
- `old_locus` (``locus``) -- The old locus, before filtering and computing
the minimal representation.
- `old_alleles` (``array<str>``) -- The old alleles, before filtering and
computing the minimal representation.
- `old_to_new` (``array<int32>``) -- An array that maps old allele index to
new allele index. Its length is the same as `old_alleles`. Alleles that
are filtered are missing.
- `new_to_old` (``array<int32>``) -- An array that maps new allele index to
the old allele index. Its length is the same as the modified `alleles`
field.
**Downcode algorithm**
We will illustrate the behavior on the example genotype below
when filtering the first alternate allele (allele 1) at a site
with 1 reference allele and 2 alternate alleles.
.. code-block:: text
GT: 1/2
GQ: 10
AD: 0,50,35
0 | 1000
1 | 1000 10
2 | 1000 0 20
+-----------------
0 1 2
The downcode algorithm recodes occurances of filtered alleles
to occurances of the reference allele (e.g. 1 -> 0 in our
example). So the depths of filtered alleles in the AD field
are added to the depth of the reference allele. Where
downcoding filtered alleles merges distinct genotypes, the
minimum PL is used (since PL is on a log scale, this roughly
corresponds to adding probabilities). The PLs are then
re-normalized (shifted) so that the most likely genotype has a
PL of 0, and GT is set to this genotype. If an allele is
filtered, this algorithm acts similarly to
:func:`.split_multi_hts`.
The downcode algorithm would produce the following:
.. code-block:: text
GT: 0/1
GQ: 10
AD: 35,50
0 | 20
1 | 0 10
+-----------
0 1
In summary:
- GT: Downcode filtered alleles to reference.
- AD: Columns of filtered alleles are eliminated and their
values are added to the reference column, e.g., filtering
alleles 1 and 2 transforms ``25,5,10,20`` to ``40,20``.
- DP: No change.
- PL: Downcode filtered alleles to reference, combine PLs
using minimum for each overloaded genotype, and shift so
the overall minimum PL is 0.
- GQ: The second-lowest PL (after shifting).
**Subset algorithm**
We will illustrate the behavior on the example genotype below
when filtering the first alternate allele (allele 1) at a site
with 1 reference allele and 2 alternate alleles.
.. code-block:: text
GT: 1/2
GQ: 10
AD: 0,50,35
0 | 1000
1 | 1000 10
2 | 1000 0 20
+-----------------
0 1 2
The subset algorithm subsets the AD and PL arrays
(i.e. removes entries corresponding to filtered alleles) and
then sets GT to the genotype with the minimum PL. Note that
if the genotype changes (as in the example), the PLs are
re-normalized (shifted) so that the most likely genotype has a
PL of 0. Qualitatively, subsetting corresponds to the belief
that the filtered alleles are not real so we should discard
any probability mass associated with them.
The subset algorithm would produce the following:
.. code-block:: text
GT: 1/1
GQ: 980
AD: 0,50
0 | 980
1 | 980 0
+-----------
0 1
In summary:
- GT: Set to most likely genotype based on the PLs ignoring
the filtered allele(s).
- AD: The filtered alleles' columns are eliminated, e.g.,
filtering alleles 1 and 2 transforms ``25,5,10,20`` to
``25,20``.
- DP: Unchanged.
- PL: Columns involving filtered alleles are eliminated and
the remaining columns' values are shifted so the minimum
value is 0.
- GQ: The second-lowest PL (after shifting).
Warning
-------
:func:`.filter_alleles_hts` does not update any row fields other than
`locus` and `alleles`. This means that row fields like allele count (AC) can
become meaningless unless they are also updated. You can update them with
:meth:`.annotate_rows`.
See Also
--------
:func:`.filter_alleles`
Parameters
----------
mt : :class:`.MatrixTable`
f : callable
Function from (allele: :class:`.StringExpression`, allele_index:
:class:`.Int32Expression`) to :class:`.BooleanExpression`
subset : :obj:`.bool`
Subset PL field if ``True``, otherwise downcode PL field. The
calculation of GT and GQ also depend on whether one subsets or
downcodes the PL.
Returns
-------
:class:`.MatrixTable`
"""
if mt.entry.dtype != hl.hts_entry_schema:
raise FatalError("'filter_alleles_hts': entry schema must be the HTS entry schema:\n"
" found: {}\n"
" expected: {}\n"
" Use 'hl.filter_alleles' to split entries with non-HTS entry fields.".format(
mt.entry.dtype, hl.hts_entry_schema))
mt = filter_alleles(mt, f)
if subset:
newPL = hl.if_else(
hl.is_defined(mt.PL),
hl.bind(
lambda unnorm: unnorm - hl.min(unnorm),
hl.range(0, hl.triangle(mt.alleles.length())).map(
lambda newi: hl.bind(
lambda newc: mt.PL[hl.call(mt.new_to_old[newc[0]],
mt.new_to_old[newc[1]]).unphased_diploid_gt_index()],
hl.unphased_diploid_gt_index_call(newi)))),
hl.missing(tarray(tint32)))
return mt.annotate_entries(
GT=hl.unphased_diploid_gt_index_call(hl.argmin(newPL, unique=True)),
AD=hl.if_else(
hl.is_defined(mt.AD),
hl.range(0, mt.alleles.length()).map(
lambda newi: mt.AD[mt.new_to_old[newi]]),
hl.missing(tarray(tint32))),
# DP unchanged
GQ=hl.gq_from_pl(newPL),
PL=newPL)
# otherwise downcode
else:
mt = mt.annotate_rows(__old_to_new_no_na=mt.old_to_new.map(lambda x: hl.or_else(x, 0)))
newPL = hl.if_else(
hl.is_defined(mt.PL),
(hl.range(0, hl.triangle(hl.len(mt.alleles)))
.map(lambda newi: hl.min(hl.range(0, hl.triangle(hl.len(mt.old_alleles)))
.filter(lambda oldi: hl.bind(
lambda oldc: hl.call(mt.__old_to_new_no_na[oldc[0]],
mt.__old_to_new_no_na[oldc[1]]) == hl.unphased_diploid_gt_index_call(newi),
hl.unphased_diploid_gt_index_call(oldi)))
.map(lambda oldi: mt.PL[oldi])))),
hl.missing(tarray(tint32)))
return mt.annotate_entries(
GT=hl.call(mt.__old_to_new_no_na[mt.GT[0]],
mt.__old_to_new_no_na[mt.GT[1]]),
AD=hl.if_else(
hl.is_defined(mt.AD),
(hl.range(0, hl.len(mt.alleles))
.map(lambda newi: hl.sum(hl.range(0, hl.len(mt.old_alleles))
.filter(lambda oldi: mt.__old_to_new_no_na[oldi] == newi)
.map(lambda oldi: mt.AD[oldi])))),
hl.missing(tarray(tint32))),
# DP unchanged
GQ=hl.gq_from_pl(newPL),
PL=newPL).drop('__old_to_new_no_na')
@typecheck(mt=MatrixTable,
call_field=str,
r2=numeric,
bp_window_size=int,
memory_per_core=int)
def _local_ld_prune(mt, call_field, r2=0.2, bp_window_size=1000000, memory_per_core=256):
bytes_per_core = memory_per_core * 1024 * 1024
fraction_memory_to_use = 0.25
variant_byte_overhead = 50
genotypes_per_pack = 32
n_samples = mt.count_cols()
min_bytes_per_core = math.ceil((1 / fraction_memory_to_use) * 8 * n_samples + variant_byte_overhead)
if bytes_per_core < min_bytes_per_core:
raise ValueError("memory_per_core must be greater than {} MB".format(min_bytes_per_core // (1024 * 1024)))
bytes_per_variant = math.ceil(8 * n_samples / genotypes_per_pack) + variant_byte_overhead
bytes_available_per_core = bytes_per_core * fraction_memory_to_use
max_queue_size = int(max(1.0, math.ceil(bytes_available_per_core / bytes_per_variant)))
info(f'ld_prune: running local pruning stage with max queue size of {max_queue_size} variants')
return Table(ir.MatrixToTableApply(mt._mir, {
'name': 'LocalLDPrune',
'callField': call_field,
'r2Threshold': float(r2),
'windowSize': bp_window_size,
'maxQueueSize': max_queue_size
}))
@typecheck(call_expr=expr_call,
r2=numeric,
bp_window_size=int,
memory_per_core=int,
keep_higher_maf=bool,
block_size=nullable(int))
def ld_prune(call_expr, r2=0.2, bp_window_size=1000000, memory_per_core=256, keep_higher_maf=True, block_size=None):
"""Returns a maximal subset of variants that are nearly uncorrelated within each window.
.. include:: ../_templates/req_diploid_gt.rst
.. include:: ../_templates/req_biallelic.rst
.. include:: ../_templates/req_tvariant.rst
Examples
--------
Prune variants in linkage disequilibrium by filtering a dataset to those variants returned
by :func:`.ld_prune`. If the dataset contains multiallelic variants, the multiallelic variants
must be filtered out or split before being passed to :func:`.ld_prune`.
>>> biallelic_dataset = dataset.filter_rows(hl.len(dataset.alleles) == 2)
>>> pruned_variant_table = hl.ld_prune(biallelic_dataset.GT, r2=0.2, bp_window_size=500000)
>>> filtered_ds = dataset.filter_rows(hl.is_defined(pruned_variant_table[dataset.row_key]))
Notes
-----
This method finds a maximal subset of variants such that the squared Pearson
correlation coefficient :math:`r^2` of any pair at most `bp_window_size`
base pairs apart is strictly less than `r2`. Each variant is represented as
a vector over samples with elements given by the (mean-imputed) number of
alternate alleles. In particular, even if present, **phase information is
ignored**. Variants that do not vary across samples are dropped.
The method prunes variants in linkage disequilibrium in three stages.
- The first, "local pruning" stage prunes correlated variants within each
partition, using a local variant queue whose size is determined by
`memory_per_core`. A larger queue may facilitate more local pruning in
this stage. Minor allele frequency is not taken into account. The
parallelism is the number of matrix table partitions.
- The second, "global correlation" stage uses block-sparse matrix
multiplication to compute correlation between each pair of remaining
variants within `bp_window_size` base pairs, and then forms a graph of
correlated variants. The parallelism of writing the locally-pruned matrix
table as a block matrix is ``n_locally_pruned_variants / block_size``.
- The third, "global pruning" stage applies :func:`.maximal_independent_set`
to prune variants from this graph until no edges remain. This algorithm
iteratively removes the variant with the highest vertex degree. If
`keep_higher_maf` is true, then in the case of a tie for highest degree,
the variant with lowest minor allele frequency is removed.
Warning
-------
The locally-pruned matrix table and block matrix are stored as temporary files
on persistent disk. See the warnings on `BlockMatrix.from_entry_expr` with
regard to memory and Hadoop replication errors.
Parameters
----------
call_expr : :class:`.CallExpression`
Entry-indexed call expression on a matrix table with row-indexed
variants and column-indexed samples.
r2 : :obj:`float`
Squared correlation threshold (exclusive upper bound).
Must be in the range [0.0, 1.0].
bp_window_size: :obj:`int`
Window size in base pairs (inclusive upper bound).
memory_per_core : :obj:`int`
Memory in MB per core for local pruning queue.
keep_higher_maf: :obj:`int`
If ``True``, break ties at each step of the global pruning stage by
preferring to keep variants with higher minor allele frequency.
block_size: :obj:`int`, optional
Block size for block matrices in the second stage.
Default given by :meth:`.BlockMatrix.default_block_size`.
Returns
-------
:class:`.Table`
Table of a maximal independent set of variants.
"""
if block_size is None:
block_size = BlockMatrix.default_block_size()
if not 0.0 <= r2 <= 1:
raise ValueError(f'r2 must be in the range [0.0, 1.0], found {r2}')
if bp_window_size < 0:
raise ValueError(f'bp_window_size must be non-negative, found {bp_window_size}')
check_entry_indexed('ld_prune/call_expr', call_expr)
mt = matrix_table_source('ld_prune/call_expr', call_expr)
require_row_key_variant(mt, 'ld_prune')
# FIXME: remove once select_entries on a field is free
if call_expr in mt._fields_inverse:
field = mt._fields_inverse[call_expr]
else:
field = Env.get_uid()
mt = mt.select_entries(**{field: call_expr})
mt = mt.select_rows().select_cols()
mt = mt.distinct_by_row()
locally_pruned_table_path = new_temp_file()
(_local_ld_prune(require_biallelic(mt, 'ld_prune'), field, r2, bp_window_size, memory_per_core)
.write(locally_pruned_table_path, overwrite=True))
locally_pruned_table = hl.read_table(locally_pruned_table_path).add_index()
mt = mt.annotate_rows(info=locally_pruned_table[mt.row_key])
mt = mt.filter_rows(hl.is_defined(mt.info)).unfilter_entries()
std_gt_bm = BlockMatrix.from_entry_expr(
hl.or_else(
(mt[field].n_alt_alleles() - mt.info.mean) * mt.info.centered_length_rec,
0.0),
block_size=block_size)
r2_bm = (std_gt_bm @ std_gt_bm.T) ** 2
_, stops = hl.linalg.utils.locus_windows(locally_pruned_table.locus, bp_window_size)
entries = r2_bm.sparsify_row_intervals(range(stops.size), stops, blocks_only=True).entries(keyed=False)
entries = entries.filter((entries.entry >= r2) & (entries.i < entries.j))
entries = entries.select(i=hl.int32(entries.i), j=hl.int32(entries.j))
if keep_higher_maf:
fields = ['mean', 'locus']
else:
fields = ['locus']
info = locally_pruned_table.aggregate(
hl.agg.collect(locally_pruned_table.row.select('idx', *fields)), _localize=False)
info = hl.sorted(info, key=lambda x: x.idx)
entries = entries.annotate_globals(info=info)
entries = entries.filter(
(entries.info[entries.i].locus.contig == entries.info[entries.j].locus.contig)
& (entries.info[entries.j].locus.position - entries.info[entries.i].locus.position <= bp_window_size))
if keep_higher_maf:
entries = entries.annotate(
i=hl.struct(idx=entries.i,
twice_maf=hl.min(entries.info[entries.i].mean, 2.0 - entries.info[entries.i].mean)),
j=hl.struct(idx=entries.j,
twice_maf=hl.min(entries.info[entries.j].mean, 2.0 - entries.info[entries.j].mean)))
def tie_breaker(left, right):
return hl.sign(right.twice_maf - left.twice_maf)
else:
tie_breaker = None
variants_to_remove = hl.maximal_independent_set(
entries.i, entries.j, keep=False, tie_breaker=tie_breaker, keyed=False)
locally_pruned_table = locally_pruned_table.annotate_globals(
variants_to_remove=variants_to_remove.aggregate(
hl.agg.collect_as_set(variants_to_remove.node.idx), _localize=False))
return locally_pruned_table.filter(
locally_pruned_table.variants_to_remove.contains(hl.int32(locally_pruned_table.idx)),
keep=False
).select().persist()
def _warn_if_no_intercept(caller, covariates):
if all([e._indices.axes for e in covariates]):
warning(f'{caller}: model appears to have no intercept covariate.'
'\n To include an intercept, add 1.0 to the list of covariates.')
return True
return False
| mit |
haraldschilly/smc | src/smc_sagews/smc_sagews/sage_server.py | 3 | 69396 | #!/usr/bin/env python
"""
sage_server.py -- unencrypted forking TCP server.
Note: I wrote functionality so this can run as root, create accounts on the fly,
and serve sage as those accounts. Doing this is horrendous from a security point of
view, and I'm definitely not doing this. None of that functionality is actually
used in https://cloud.sagemath.com!
For debugging, this may help:
killemall sage_server.py && sage --python sage_server.py -p 6000
"""
# NOTE: This file is GPL'd
# because it imports the Sage library. This file is not directly
# imported by anything else in Salvus; the Python process it runs is
# used over a TCP connection.
#########################################################################################
# Copyright (C) 2013 William Stein <wstein@gmail.com> #
# #
# Distributed under the terms of the GNU General Public License (GPL), version 2+ #
# #
# http://www.gnu.org/licenses/ #
#########################################################################################
# Add the path that contains this file to the Python load path, so we
# can import other files from there.
import os, sys, time
# used for clearing pylab figure
pylab = None
# Maximum number of distinct (non-once) output messages per cell; when this number is
# exceeded, an exception is raised; this reduces the chances of the user creating
# a huge unusable worksheet.
MAX_OUTPUT_MESSAGES = 256
# stdout, stderr, html, etc. that exceeds this many characters will be truncated to avoid
# killing the client.
MAX_STDOUT_SIZE = MAX_STDERR_SIZE = MAX_CODE_SIZE = MAX_HTML_SIZE = MAX_MD_SIZE = MAX_TEX_SIZE = 40000
MAX_OUTPUT = 150000
# We import the notebook interact, which we will monkey patch below,
# first, since importing later causes trouble in sage>=5.6.
import sagenb.notebook.interact
# Standard imports.
import json, resource, shutil, signal, socket, struct, \
tempfile, time, traceback, pwd
import sage_parsing, sage_salvus
uuid = sage_salvus.uuid
def unicode8(s):
# I evidently don't understand Python unicode... Do the following for now:
# TODO: see http://stackoverflow.com/questions/21897664/why-does-unicodeu-passed-an-errors-parameter-raise-typeerror for how to fix.
try:
return unicode(s, 'utf8')
except:
try:
return unicode(s)
except:
return s
LOGFILE = os.path.realpath(__file__)[:-3] + ".log"
PID = os.getpid()
from datetime import datetime
def log(*args):
#print "logging to %s"%LOGFILE
try:
debug_log = open(LOGFILE, 'a')
mesg = "%s (%s): %s\n"%(PID, datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3], ' '.join([unicode8(x) for x in args]))
debug_log.write(mesg)
debug_log.flush()
except:
log("an error writing a log message (ignoring)")
# Determine the info object, if available. There's no good reason
# it wouldn't be available, unless a user explicitly deleted it, but
# we may as well try to be robust to this, especially if somebody
# were to try to use this server outside of cloud.sagemath.com.
_info_path = os.path.join(os.environ['SMC'], 'info.json')
if os.path.exists(_info_path):
INFO = json.loads(open(_info_path).read())
else:
INFO = {}
if 'base_url' not in INFO:
INFO['base_url'] = ''
# Configure logging
#logging.basicConfig()
#log = logging.getLogger('sage_server')
#log.setLevel(logging.INFO)
# A CoffeeScript version of this function is in misc_node.coffee.
import hashlib
def uuidsha1(data):
sha1sum = hashlib.sha1()
sha1sum.update(data)
s = sha1sum.hexdigest()
t = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'
r = list(t)
j = 0
for i in range(len(t)):
if t[i] == 'x':
r[i] = s[j]; j += 1
elif t[i] == 'y':
# take 8 + low order 3 bits of hex number.
r[i] = hex( (int(s[j],16)&0x3) |0x8)[-1]; j += 1
return ''.join(r)
# A tcp connection with support for sending various types of messages, especially JSON.
class ConnectionJSON(object):
def __init__(self, conn):
assert not isinstance(conn, ConnectionJSON) # avoid common mistake -- conn is supposed to be from socket.socket...
self._conn = conn
def close(self):
self._conn.close()
def _send(self, s):
length_header = struct.pack(">L", len(s))
self._conn.send(length_header + s)
def send_json(self, m):
m = json.dumps(m)
log(u"sending message '", truncate_text(m, 256), u"'")
self._send('j' + m)
return len(m)
def send_blob(self, blob):
s = uuidsha1(blob)
self._send('b' + s + blob)
return s
def send_file(self, filename):
log("sending file '%s'"%filename)
f = open(filename, 'rb')
data = f.read()
f.close()
return self.send_blob(data)
def _recv(self, n):
#print "_recv(%s)"%n
for i in range(20): # see http://stackoverflow.com/questions/3016369/catching-blocking-sigint-during-system-call
try:
#print "blocking recv (i = %s), pid=%s"%(i, os.getpid())
r = self._conn.recv(n)
#log("n=%s; received: '%s' of len %s"%(n,r, len(r)))
return r
except socket.error as (errno, msg):
#print "socket.error, msg=%s"%msg
if errno != 4:
raise
raise EOFError
def recv(self):
n = self._recv(4)
if len(n) < 4:
raise EOFError
n = struct.unpack('>L', n)[0] # big endian 32 bits
s = self._recv(n)
while len(s) < n:
t = self._recv(n - len(s))
if len(t) == 0:
raise EOFError
s += t
if s[0] == 'j':
try:
return 'json', json.loads(s[1:])
except Exception, msg:
log("Unable to parse JSON '%s'"%s[1:])
raise
elif s[0] == 'b':
return 'blob', s[1:]
raise ValueError("unknown message type '%s'"%s[0])
TRUNCATE_MESG = "WARNING: Output truncated. Type 'smc?' to learn how to raise the output limit."
def truncate_text(s, max_size):
if len(s) > max_size:
return s[:max_size] + "[...]", True
else:
return s, False
class Message(object):
def _new(self, event, props={}):
m = {'event':event}
for key, val in props.iteritems():
if key != 'self':
m[key] = val
return m
def start_session(self):
return self._new('start_session')
def session_description(self, pid):
return self._new('session_description', {'pid':pid})
def send_signal(self, pid, signal=signal.SIGINT):
return self._new('send_signal', locals())
def terminate_session(self, done=True):
return self._new('terminate_session', locals())
def execute_code(self, id, code, preparse=True):
return self._new('execute_code', locals())
def execute_javascript(self, code, obj=None, coffeescript=False):
return self._new('execute_javascript', locals())
def output(self, id,
stdout = None,
stderr = None,
code = None,
html = None,
javascript = None,
coffeescript = None,
interact = None,
md = None,
tex = None,
d3 = None,
file = None,
raw_input = None,
obj = None,
done = None,
once = None,
hide = None,
show = None,
auto = None,
events = None,
clear = None,
delete_last = None):
m = self._new('output')
m['id'] = id
t = truncate_text
did_truncate = False
import sage_server # we do this so that the user can customize the MAX's below.
if code is not None:
code['source'], did_truncate = t(code['source'], sage_server.MAX_CODE_SIZE)
m['code'] = code
if stderr is not None and len(stderr) > 0:
m['stderr'], did_truncate = t(stderr, sage_server.MAX_STDERR_SIZE)
if stdout is not None and len(stdout) > 0:
m['stdout'], did_truncate = t(stdout, sage_server.MAX_STDOUT_SIZE)
if html is not None and len(html) > 0:
m['html'], did_truncate = t(html, sage_server.MAX_HTML_SIZE)
if md is not None and len(md) > 0:
m['md'], did_truncate = t(md, sage_server.MAX_MD_SIZE)
if tex is not None and len(tex)>0:
tex['tex'], did_truncate = t(tex['tex'], sage_server.MAX_TEX_SIZE)
m['tex'] = tex
if javascript is not None: m['javascript'] = javascript
if coffeescript is not None: m['coffeescript'] = coffeescript
if interact is not None: m['interact'] = interact
if d3 is not None: m['d3'] = d3
if obj is not None: m['obj'] = json.dumps(obj)
if file is not None: m['file'] = file # = {'filename':..., 'uuid':...}
if raw_input is not None: m['raw_input'] = raw_input
if done is not None: m['done'] = done
if once is not None: m['once'] = once
if hide is not None: m['hide'] = hide
if show is not None: m['show'] = show
if auto is not None: m['auto'] = auto
if events is not None: m['events'] = events
if clear is not None: m['clear'] = clear
if delete_last is not None: m['delete_last'] = delete_last
if did_truncate:
if 'stderr' in m:
m['stderr'] += '\n' + TRUNCATE_MESG
else:
m['stderr'] = '\n' + TRUNCATE_MESG
return m
def introspect_completions(self, id, completions, target):
m = self._new('introspect_completions', locals())
m['id'] = id
return m
def introspect_docstring(self, id, docstring, target):
m = self._new('introspect_docstring', locals())
m['id'] = id
return m
def introspect_source_code(self, id, source_code, target):
m = self._new('introspect_source_code', locals())
m['id'] = id
return m
message = Message()
whoami = os.environ['USER']
def client1(port, hostname):
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect((hostname, int(port)))
conn = ConnectionJSON(conn)
conn.send_json(message.start_session())
typ, mesg = conn.recv()
pid = mesg['pid']
print "PID = %s"%pid
id = 0
while True:
try:
code = sage_parsing.get_input('sage [%s]: '%id)
if code is None: # EOF
break
conn.send_json(message.execute_code(code=code, id=id))
while True:
typ, mesg = conn.recv()
if mesg['event'] == 'terminate_session':
return
elif mesg['event'] == 'output':
if 'stdout' in mesg:
sys.stdout.write(mesg['stdout']); sys.stdout.flush()
if 'stderr' in mesg:
print '! ' + '\n! '.join(mesg['stderr'].splitlines())
if 'done' in mesg and mesg['id'] >= id:
break
id += 1
except KeyboardInterrupt:
print "Sending interrupt signal"
conn2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn2.connect((hostname, int(port)))
conn2 = ConnectionJSON(conn2)
conn2.send_json(message.send_signal(pid))
del conn2
id += 1
conn.send_json(message.terminate_session())
print "\nExiting Sage client."
class BufferedOutputStream(object):
def __init__(self, f, flush_size=4096, flush_interval=.1):
self._f = f
self._buf = ''
self._flush_size = flush_size
self._flush_interval = flush_interval
self.reset()
def reset(self):
self._last_flush_time = time.time()
def fileno(self):
return 0
def write(self, output):
self._buf += output
#self.flush()
t = time.time()
if ((len(self._buf) >= self._flush_size) or
(t - self._last_flush_time >= self._flush_interval)):
self.flush()
self._last_flush_time = t
def flush(self, done=False):
if not self._buf and not done:
# no point in sending an empty message
return
self._f(self._buf, done=done)
self._buf = ''
def isatty(self):
return False
# This will *have* to be re-done using Cython for speed.
class Namespace(dict):
def __init__(self, x):
self._on_change = {}
self._on_del = {}
dict.__init__(self, x)
def on(self, event, x, f):
if event == 'change':
if x not in self._on_change:
self._on_change[x] = []
self._on_change[x].append(f)
elif event == 'del':
if x not in self._on_del:
self._on_del[x] = []
self._on_del[x].append(f)
def remove(self, event, x, f):
if event == 'change' and self._on_change.has_key(x):
v = self._on_change[x]
i = v.find(f)
if i != -1:
del v[i]
if len(v) == 0:
del self._on_change[x]
elif event == 'del' and self._on_del.has_key(x):
v = self._on_del[x]
i = v.find(f)
if i != -1:
del v[i]
if len(v) == 0:
del self._on_del[x]
def __setitem__(self, x, y):
dict.__setitem__(self, x, y)
try:
if self._on_change.has_key(x):
for f in self._on_change[x]:
f(y)
if self._on_change.has_key(None):
for f in self._on_change[None]:
f(x, y)
except Exception, mesg:
print mesg
def __delitem__(self, x):
try:
if self._on_del.has_key(x):
for f in self._on_del[x]:
f()
if self._on_del.has_key(None):
for f in self._on_del[None]:
f(x)
except Exception, mesg:
print mesg
dict.__delitem__(self, x)
def set(self, x, y, do_not_trigger=None):
dict.__setitem__(self, x, y)
if self._on_change.has_key(x):
if do_not_trigger is None:
do_not_trigger = []
for f in self._on_change[x]:
if f not in do_not_trigger:
f(y)
if self._on_change.has_key(None):
for f in self._on_change[None]:
f(x,y)
class TemporaryURL:
def __init__(self, url, ttl):
self.url = url
self.ttl = ttl
def __repr__(self):
return repr(self.url)
def __str__(self):
return self.url
namespace = Namespace({})
class Salvus(object):
"""
Cell execution state object and wrapper for access to special SageMathCloud functionality.
An instance of this object is created each time you execute a cell. It has various methods
for sending different types of output messages, links to files, etc. Type 'help(smc)' for
more details.
OUTPUT LIMITATIONS -- There is an absolute limit on the number of messages output for a given
cell, and also the size of the output message for each cell. You can access or change
those limits dynamically in a worksheet as follows by viewing or changing any of the
following variables::
sage_server.MAX_STDOUT_SIZE # max length of each stdout output message
sage_server.MAX_STDERR_SIZE # max length of each stderr output message
sage_server.MAX_MD_SIZE # max length of each md (markdown) output message
sage_server.MAX_HTML_SIZE # max length of each html output message
sage_server.MAX_TEX_SIZE # max length of tex output message
sage_server.MAX_OUTPUT_MESSAGES # max number of messages output for a cell.
And::
sage_server.MAX_OUTPUT # max total character output for a single cell; computation
# terminated/truncated if sum of above exceeds this.
"""
Namespace = Namespace
_prefix = ''
_postfix = ''
_default_mode = 'sage'
def _flush_stdio(self):
"""
Flush the standard output streams. This should be called before sending any message
that produces output.
"""
sys.stdout.flush()
sys.stderr.flush()
def __repr__(self):
return ''
def __init__(self, conn, id, data=None, cell_id=None, message_queue=None):
self._conn = conn
self._num_output_messages = 0
self._total_output_length = 0
self._output_warning_sent = False
self._id = id
self._done = True # done=self._done when last execute message is sent; e.g., set self._done = False to not close cell on code term.
self.data = data
self.cell_id = cell_id
self.namespace = namespace
self.message_queue = message_queue
self.code_decorators = [] # gets reset if there are code decorators
# Alias: someday remove all references to "salvus" and instead use smc.
# For now this alias is easier to think of and use.
namespace['smc'] = namespace['salvus'] = self # beware of circular ref?
# Monkey patch in our "require" command.
namespace['require'] = self.require
# Make the salvus object itself available when doing "from sage.all import *".
import sage.all
sage.all.salvus = self
def _send_output(self, *args, **kwds):
if self._output_warning_sent:
raise KeyboardInterrupt
mesg = message.output(*args, **kwds)
if not mesg.get('once',False):
self._num_output_messages += 1
import sage_server
if self._num_output_messages > sage_server.MAX_OUTPUT_MESSAGES:
self._output_warning_sent = True
err = "\nToo many output messages (at most %s per cell -- type 'smc?' to learn how to raise this limit): attempting to terminate..."%sage_server.MAX_OUTPUT_MESSAGES
self._conn.send_json(message.output(stderr=err, id=self._id, once=False, done=True))
raise KeyboardInterrupt
n = self._conn.send_json(mesg)
self._total_output_length += n
if self._total_output_length > sage_server.MAX_OUTPUT:
self._output_warning_sent = True
err = "\nOutput too long -- MAX_OUTPUT (=%s) exceed (type 'smc?' to learn how to raise this limit): attempting to terminate..."%sage_server.MAX_OUTPUT
self._conn.send_json(message.output(stderr=err, id=self._id, once=False, done=True))
raise KeyboardInterrupt
def obj(self, obj, done=False):
self._send_output(obj=obj, id=self._id, done=done)
return self
def link(self, filename, label=None, foreground=True, cls=''):
"""
Output a clickable link to a file somewhere in this project. The filename
path must be relative to the current working directory of the Python process.
The simplest way to use this is
salvus.link("../name/of/file") # any relative path to any file
This creates a link, which when clicked on, opens that file in the foreground.
If the filename is the name of a directory, clicking will instead
open the file browser on that directory:
salvus.link("../name/of/directory") # clicking on the resulting link opens a directory
If you would like a button instead of a link, pass cls='btn'. You can use any of
the standard Bootstrap button classes, e.g., btn-small, btn-large, btn-success, etc.
If you would like to change the text in the link (or button) to something
besides the default (filename), just pass arbitrary HTML to the label= option.
INPUT:
- filename -- a relative path to a file or directory
- label -- (default: the filename) html label for the link
- foreground -- (default: True); if True, opens link in the foreground
- cls -- (default: '') optional CSS classes, such as 'btn'.
EXAMPLES:
Use as a line decorator::
%salvus.link name/of/file.foo
Make a button::
salvus.link("foo/bar/", label="The Bar Directory", cls='btn')
Make two big blue buttons with plots in them::
plot(sin, 0, 20).save('sin.png')
plot(cos, 0, 20).save('cos.png')
for img in ['sin.png', 'cos.png']:
salvus.link(img, label="<img width='150px' src='%s'>"%salvus.file(img, show=False), cls='btn btn-large btn-primary')
"""
path = os.path.abspath(filename)[len(os.environ['HOME'])+1:]
if label is None:
label = filename
id = uuid()
self.html("<a class='%s' style='cursor:pointer'; id='%s'></a>"%(cls, id))
s = "$('#%s').html(obj.label).click(function() {%s; return false;});"%(id, self._action(path, foreground))
self.javascript(s, obj={'label':label, 'path':path, 'foreground':foreground}, once=False)
def _action(self, path, foreground):
if os.path.isdir(path):
action = "worksheet.project_page.chdir(obj.path);"
if foreground:
action += "worksheet.project_page.display_tab('project-file-listing');"
else:
action = "worksheet.project_page.open_file({'path':obj.path, 'foreground': obj.foreground});"
return action
def open_tab(self, filename, foreground=True):
"""
Open a new file (or directory) document in another tab.
See the documentation for salvus.link.
"""
path = os.path.abspath(filename)[len(os.environ['HOME'])+1:]
self.javascript(self._action(path, foreground),
obj = {'path':path, 'foreground':foreground}, once=True)
def close_tab(self, filename):
"""
Open an open file tab. The filename is relative to the current working directory.
"""
self.javascript("worksheet.editor.close(obj)", obj = filename, once=True)
def threed(self,
g, # sage Graphic3d object.
width = None,
height = None,
frame = True, # True/False or {'color':'black', 'thickness':.4, 'labels':True, 'fontsize':14, 'draw':True,
# 'xmin':?, 'xmax':?, 'ymin':?, 'ymax':?, 'zmin':?, 'zmax':?}
background = None,
foreground = None,
spin = False,
aspect_ratio = None,
frame_aspect_ratio = None, # synonym for aspect_ratio
done = False,
renderer = None, # None, 'webgl', or 'canvas'
):
from graphics import graphics3d_to_jsonable, json_float as f
# process options, combining ones set explicitly above with ones inherited from 3d scene
opts = { 'width':width, 'height':height,
'background':background, 'foreground':foreground,
'spin':spin, 'aspect_ratio':aspect_ratio,
'renderer':renderer}
extra_kwds = {} if g._extra_kwds is None else g._extra_kwds
# clean up and normalize aspect_ratio option
if aspect_ratio is None:
if frame_aspect_ratio is not None:
aspect_ratio = frame_aspect_ratio
elif 'frame_aspect_ratio' in extra_kwds:
aspect_ratio = extra_kwds['frame_aspect_ratio']
elif 'aspect_ratio' in extra_kwds:
aspect_ratio = extra_kwds['aspect_ratio']
if aspect_ratio is not None:
if aspect_ratio == 1 or aspect_ratio == "automatic":
aspect_ratio = None
elif not (isinstance(aspect_ratio, (list, tuple)) and len(aspect_ratio) == 3):
raise TypeError("aspect_ratio must be None, 1 or a 3-tuple, but it is '%s'"%(aspect_ratio,))
else:
aspect_ratio = [f(x) for x in aspect_ratio]
opts['aspect_ratio'] = aspect_ratio
for k in ['spin', 'height', 'width', 'background', 'foreground', 'renderer']:
if k in extra_kwds and not opts.get(k,None):
opts[k] = extra_kwds[k]
if not isinstance(opts['spin'], bool):
opts['spin'] = f(opts['spin'])
opts['width'] = f(opts['width'])
opts['height'] = f(opts['height'])
# determine the frame
b = g.bounding_box()
xmin, xmax, ymin, ymax, zmin, zmax = b[0][0], b[1][0], b[0][1], b[1][1], b[0][2], b[1][2]
fr = opts['frame'] = {'xmin':f(xmin), 'xmax':f(xmax),
'ymin':f(ymin), 'ymax':f(ymax),
'zmin':f(zmin), 'zmax':f(zmax)}
if isinstance(frame, dict):
for k in fr.keys():
if k in frame:
fr[k] = f(frame[k])
fr['draw'] = frame.get('draw', True)
fr['color'] = frame.get('color', None)
fr['thickness'] = f(frame.get('thickness', None))
fr['labels'] = frame.get('labels', None)
if 'fontsize' in frame:
fr['fontsize'] = int(frame['fontsize'])
elif isinstance(frame, bool):
fr['draw'] = frame
# convert the Sage graphics object to a JSON object that can be rendered
scene = {'opts' : opts,
'obj' : graphics3d_to_jsonable(g)}
# Store that object in the database, rather than sending it directly as an output message.
# We do this since obj can easily be quite large/complicated, and managing it as part of the
# document is too slow and doesn't scale.
blob = json.dumps(scene, separators=(',', ':'))
uuid = self._conn.send_blob(blob)
# flush output (so any text appears before 3d graphics, in case they are interleaved)
self._flush_stdio()
# send message pointing to the 3d 'file', which will get downloaded from database
self._send_output(id=self._id, file={'filename':unicode8("%s.sage3d"%uuid), 'uuid':uuid}, done=done)
def d3_graph(self, g, **kwds):
from graphics import graph_to_d3_jsonable
self._send_output(id=self._id, d3={"viewer":"graph", "data":graph_to_d3_jsonable(g, **kwds)})
def file(self, filename, show=True, done=False, download=False, once=False, events=None, raw=False):
"""
Display or provide a link to the given file. Raises a RuntimeError if this
is not possible, e.g, if the file is too large.
If show=True (the default), the browser will show the file,
or provide a clickable link to it if there is no way to show it.
If show=False, this function returns an object T such that
T.url (or str(t)) is a string of the form "/blobs/filename?uuid=the_uuid"
that can be used to access the file even if the file is immediately
deleted after calling this function (the file is stored in a database).
Also, T.ttl is the time to live (in seconds) of the object. A ttl of
0 means the object is permanently available.
raw=False (the default):
If you use the URL
/blobs/filename?uuid=the_uuid&download
then the server will include a header that tells the browser to
download the file to disk instead of displaying it. Only relatively
small files can be made available this way. However, they remain
available (for a day) even *after* the file is deleted.
NOTE: It is safe to delete the file immediately after this
function (salvus.file) returns.
raw=True:
Instead, the URL is to the raw file, which is served directly
from the project:
/project-id/raw/path/to/filename
This will only work if the file is not deleted; however, arbitrarily
large files can be streamed this way.
This function creates an output message {file:...}; if the user saves
a worksheet containing this message, then any referenced blobs are made
permanent in the database.
The uuid is based on the Sha-1 hash of the file content (it is computed using the
function sage_server.uuidsha1). Any two files with the same content have the
same Sha1 hash.
"""
filename = unicode8(filename)
if raw:
info = self.project_info()
path = os.path.abspath(filename)
home = os.environ[u'HOME'] + u'/'
if path.startswith(home):
path = path[len(home):]
else:
raise ValueError(u"can only send raw files in your home directory")
url = os.path.join(u'/',info['base_url'].strip('/'), info['project_id'], u'raw', path.lstrip('/'))
if show:
self._flush_stdio()
self._send_output(id=self._id, once=once, file={'filename':filename, 'url':url, 'show':show}, events=events, done=done)
return
else:
return TemporaryURL(url=url, ttl=0)
file_uuid = self._conn.send_file(filename)
mesg = None
while mesg is None:
self.message_queue.recv()
for i, (typ, m) in enumerate(self.message_queue.queue):
if typ == 'json' and m.get('event') == 'save_blob' and m.get('sha1') == file_uuid:
mesg = m
del self.message_queue[i]
break
if 'error' in mesg:
raise RuntimeError("error saving blob -- %s"%mesg['error'])
self._flush_stdio()
self._send_output(id=self._id, once=once, file={'filename':filename, 'uuid':file_uuid, 'show':show}, events=events, done=done)
if not show:
info = self.project_info()
url = u"%s/blobs/%s?uuid=%s"%(info['base_url'], filename, file_uuid)
if download:
url += u'?download'
return TemporaryURL(url=url, ttl=mesg.get('ttl',0))
def default_mode(self, mode=None):
"""
Set the default mode for cell evaluation. This is equivalent
to putting %mode at the top of any cell that does not start
with %. Use salvus.default_mode() to return the current mode.
Use salvus.default_mode("") to have no default mode.
This is implemented using salvus.cell_prefix.
"""
if mode is None:
return Salvus._default_mode
Salvus._default_mode = mode
if mode == "sage":
self.cell_prefix("")
else:
self.cell_prefix("%" + mode)
def cell_prefix(self, prefix=None):
"""
Make it so that the given prefix code is textually
prepending to the input before evaluating any cell, unless
the first character of the cell is a %.
To append code at the end, use cell_postfix.
INPUT:
- ``prefix`` -- None (to return prefix) or a string ("" to disable)
EXAMPLES:
Make it so every cell is timed:
salvus.cell_prefix('%time')
Make it so cells are typeset using latex, and latex comments are allowed even
as the first line.
salvus.cell_prefix('%latex')
%sage salvus.cell_prefix('')
Evaluate each cell using GP (Pari) and display the time it took:
salvus.cell_prefix('%time\n%gp')
%sage salvus.cell_prefix('') # back to normal
"""
if prefix is None:
return Salvus._prefix
else:
Salvus._prefix = prefix
def cell_postfix(self, postfix=None):
"""
Make it so that the given code is textually
appended to the input before evaluating a cell.
To prepend code at the beginning, use cell_prefix.
INPUT:
- ``postfix`` -- None (to return postfix) or a string ("" to disable)
EXAMPLES:
Print memory usage after evaluating each cell:
salvus.cell_postfix('print "%s MB used"%int(get_memory_usage())')
Return to normal
salvus.set_cell_postfix('')
"""
if postfix is None:
return Salvus._postfix
else:
Salvus._postfix = postfix
def execute(self, code, namespace=None, preparse=True, locals=None):
if namespace is None:
namespace = self.namespace
# clear pylab figure (takes a few microseconds)
if pylab is not None:
pylab.clf()
#code = sage_parsing.strip_leading_prompts(code) # broken -- wrong on "def foo(x):\n print x"
blocks = sage_parsing.divide_into_blocks(code)
for start, stop, block in blocks:
if preparse:
block = sage_parsing.preparse_code(block)
sys.stdout.reset(); sys.stderr.reset()
try:
b = block.rstrip()
if b.endswith('??'):
p = sage_parsing.introspect(block,
namespace=namespace, preparse=False)
self.code(source = p['result'], mode = "python")
elif b.endswith('?'):
p = sage_parsing.introspect(block, namespace=namespace, preparse=False)
self.code(source = p['result'], mode = "text/x-rst")
else:
exec compile(block+'\n', '', 'single') in namespace, locals
sys.stdout.flush()
sys.stderr.flush()
except:
sys.stdout.flush()
sys.stderr.write('Error in lines %s-%s\n'%(start+1, stop+1))
traceback.print_exc()
sys.stderr.flush()
break
def execute_with_code_decorators(self, code_decorators, code, preparse=True, namespace=None, locals=None):
"""
salvus.execute_with_code_decorators is used when evaluating
code blocks that are set to any non-default code_decorator.
"""
import sage # used below as a code decorator
if isinstance(code_decorators, (str, unicode)):
code_decorators = [code_decorators]
if preparse:
code_decorators = map(sage_parsing.preparse_code, code_decorators)
code_decorators = [eval(code_decorator, self.namespace) for code_decorator in code_decorators]
# The code itself may want to know exactly what code decorators are in effect.
# For example, r.eval can do extra things when being used as a decorator.
self.code_decorators = code_decorators
for i, code_decorator in enumerate(code_decorators):
# eval is for backward compatibility
if not hasattr(code_decorator, 'eval') and hasattr(code_decorator, 'before'):
code_decorators[i] = code_decorator.before(code)
for code_decorator in reversed(code_decorators):
if hasattr(code_decorator, 'eval'): # eval is for backward compatibility
print code_decorator.eval(code, locals=self.namespace),
code = ''
elif code_decorator is sage:
# special case -- the sage module (i.e., %sage) should do nothing.
pass
else:
code = code_decorator(code)
if code is None:
code = ''
if code != '' and isinstance(code, (str, unicode)):
self.execute(code, preparse=preparse, namespace=namespace, locals=locals)
for code_decorator in code_decorators:
if not hasattr(code_decorator, 'eval') and hasattr(code_decorator, 'after'):
code_decorator.after(code)
def html(self, html, done=False, once=None):
"""
Display html in the output stream.
EXAMPLE:
salvus.html("<b>Hi</b>")
"""
self._flush_stdio()
self._send_output(html=unicode8(html), id=self._id, done=done, once=once)
def md(self, md, done=False, once=None):
"""
Display markdown in the output stream.
EXAMPLE:
salvus.md("**Hi**")
"""
self._flush_stdio()
self._send_output(md=unicode8(md), id=self._id, done=done, once=once)
def pdf(self, filename, **kwds):
sage_salvus.show_pdf(filename, **kwds)
def tex(self, obj, display=False, done=False, once=None, **kwds):
"""
Display obj nicely using TeX rendering.
INPUT:
- obj -- latex string or object that is automatically be converted to TeX
- display -- (default: False); if True, typeset as display math (so centered, etc.)
"""
self._flush_stdio()
tex = obj if isinstance(obj, str) else self.namespace['latex'](obj, **kwds)
self._send_output(tex={'tex':tex, 'display':display}, id=self._id, done=done, once=once)
return self
def start_executing(self):
self._send_output(done=False, id=self._id)
def clear(self, done=False):
self._send_output(clear=True, id=self._id, done=done)
def delete_last_output(self, done=False):
self._send_output(delete_last=True, id=self._id, done=done)
def stdout(self, output, done=False, once=None):
"""
Send the string output (or unicode8(output) if output is not a
string) to the standard output stream of the compute cell.
INPUT:
- output -- string or object
"""
stdout = output if isinstance(output, (str, unicode)) else unicode8(output)
self._send_output(stdout=stdout, done=done, id=self._id, once=once)
return self
def stderr(self, output, done=False, once=None):
"""
Send the string output (or unicode8(output) if output is not a
string) to the standard error stream of the compute cell.
INPUT:
- output -- string or object
"""
stderr = output if isinstance(output, (str, unicode)) else unicode8(output)
self._send_output(stderr=stderr, done=done, id=self._id, once=once)
return self
def code(self, source, # actual source code
mode = None, # the syntax highlight codemirror mode
filename = None, # path of file it is contained in (if applicable)
lineno = -1, # line number where source starts (0-based)
done=False, once=None):
"""
Send a code message, which is to be rendered as code by the client, with
appropriate syntax highlighting, maybe a link to open the source file, etc.
"""
source = source if isinstance(source, (str, unicode)) else unicode8(source)
code = {'source' : source,
'filename' : filename,
'lineno' : int(lineno),
'mode' : mode}
self._send_output(code=code, done=done, id=self._id, once=once)
return self
def _execute_interact(self, id, vals):
if id not in sage_salvus.interacts:
print "(Evaluate this cell to use this interact.)"
#raise RuntimeError, "Error: No interact with id %s"%id
else:
sage_salvus.interacts[id](vals)
def interact(self, f, done=False, once=None, **kwds):
I = sage_salvus.InteractCell(f, **kwds)
self._flush_stdio()
self._send_output(interact = I.jsonable(), id=self._id, done=done, once=once)
return sage_salvus.InteractFunction(I)
def javascript(self, code, once=False, coffeescript=False, done=False, obj=None):
"""
Execute the given Javascript code as part of the output
stream. This same code will be executed (at exactly this
point in the output stream) every time the worksheet is
rendered.
See the docs for the top-level javascript function for more details.
INPUT:
- code -- a string
- once -- boolean (default: FAlse); if True the Javascript is
only executed once, not every time the cell is loaded. This
is what you would use if you call salvus.stdout, etc. Use
once=False, e.g., if you are using javascript to make a DOM
element draggable (say). WARNING: If once=True, then the
javascript is likely to get executed before other output to
a given cell is even rendered.
- coffeescript -- boolean (default: False); if True, the input
code is first converted from CoffeeScript to Javascript.
At least the following Javascript objects are defined in the
scope in which the code is evaluated::
- cell -- jQuery wrapper around the current compute cell
- salvus.stdout, salvus.stderr, salvus.html, salvus.tex -- all
allow you to write additional output to the cell
- worksheet - jQuery wrapper around the current worksheet DOM object
- obj -- the optional obj argument, which is passed via JSON serialization
"""
if obj is None:
obj = {}
self._send_output(javascript={'code':code, 'coffeescript':coffeescript}, id=self._id, done=done, obj=obj, once=once)
def coffeescript(self, *args, **kwds):
"""
This is the same as salvus.javascript, but with coffeescript=True.
See the docs for the top-level javascript function for more details.
"""
kwds['coffeescript'] = True
self.javascript(*args, **kwds)
def raw_input(self, prompt='', default='', placeholder='', input_width=None, label_width=None, done=False, type=None): # done is ignored here
self._flush_stdio()
m = {'prompt':unicode8(prompt)}
if input_width is not None:
m['input_width'] = unicode8(input_width)
if label_width is not None:
m['label_width'] = unicode8(label_width)
if default:
m['value'] = unicode8(default)
if placeholder:
m['placeholder'] = unicode8(placeholder)
self._send_output(raw_input=m, id=self._id)
typ, mesg = self.message_queue.next_mesg()
if typ == 'json' and mesg['event'] == 'codemirror_sage_raw_input':
# everything worked out perfectly
self.delete_last_output()
m['value'] = mesg['value'] # as unicode!
m['submitted'] = True
self._send_output(raw_input=m, id=self._id)
value = mesg['value']
if type is not None:
if type == 'sage':
value = sage_salvus.sage_eval(value)
else:
try:
value = type(value)
except TypeError:
# Some things in Sage are clueless about unicode for some reason...
# Let's at least try, in case the unicode can convert to a string.
value = type(str(value))
return value
else:
raise KeyboardInterrupt("raw_input interrupted by another action")
def _check_component(self, component):
if component not in ['input', 'output']:
raise ValueError("component must be 'input' or 'output'")
def hide(self, component):
"""
Hide the given component ('input' or 'output') of the cell.
"""
self._check_component(component)
self._send_output(self._id, hide=component)
def show(self, component):
"""
Show the given component ('input' or 'output') of the cell.
"""
self._check_component(component)
self._send_output(self._id, show=component)
def auto(self, state=True):
"""
Set whether or not the current cells is automatically executed when
the Sage process restarts.
"""
self._send_output(self._id, auto=state)
def notify(self, **kwds):
"""
Display a graphical notification using the pnotify Javascript library.
INPUTS:
- `title: false` - The notice's title.
- `title_escape: false` - Whether to escape the content of the title. (Not allow HTML.)
- `text: false` - The notice's text.
- `text_escape: false` - Whether to escape the content of the text. (Not allow HTML.)
- `styling: "bootstrap"` - What styling classes to use. (Can be either jqueryui or bootstrap.)
- `addclass: ""` - Additional classes to be added to the notice. (For custom styling.)
- `cornerclass: ""` - Class to be added to the notice for corner styling.
- `nonblock: false` - Create a non-blocking notice. It lets the user click elements underneath it.
- `nonblock_opacity: .2` - The opacity of the notice (if it's non-blocking) when the mouse is over it.
- `history: true` - Display a pull down menu to redisplay previous notices, and place the notice in the history.
- `auto_display: true` - Display the notice when it is created. Turn this off to add notifications to the history without displaying them.
- `width: "300px"` - Width of the notice.
- `min_height: "16px"` - Minimum height of the notice. It will expand to fit content.
- `type: "notice"` - Type of the notice. "notice", "info", "success", or "error".
- `icon: true` - Set icon to true to use the default icon for the selected style/type, false for no icon, or a string for your own icon class.
- `animation: "fade"` - The animation to use when displaying and hiding the notice. "none", "show", "fade", and "slide" are built in to jQuery. Others require jQuery UI. Use an object with effect_in and effect_out to use different effects.
- `animate_speed: "slow"` - Speed at which the notice animates in and out. "slow", "def" or "normal", "fast" or number of milliseconds.
- `opacity: 1` - Opacity of the notice.
- `shadow: true` - Display a drop shadow.
- `closer: true` - Provide a button for the user to manually close the notice.
- `closer_hover: true` - Only show the closer button on hover.
- `sticker: true` - Provide a button for the user to manually stick the notice.
- `sticker_hover: true` - Only show the sticker button on hover.
- `hide: true` - After a delay, remove the notice.
- `delay: 8000` - Delay in milliseconds before the notice is removed.
- `mouse_reset: true` - Reset the hide timer if the mouse moves over the notice.
- `remove: true` - Remove the notice's elements from the DOM after it is removed.
- `insert_brs: true` - Change new lines to br tags.
"""
obj = {}
for k, v in kwds.iteritems():
obj[k] = sage_salvus.jsonable(v)
self.javascript("$.pnotify(obj)", once=True, obj=obj)
def execute_javascript(self, code, coffeescript=False, obj=None):
"""
Tell the browser to execute javascript. Basically the same as
salvus.javascript with once=True (the default), except this
isn't tied to a particular cell. There is a worksheet object
defined in the scope of the evaluation.
See the docs for the top-level javascript function for more details.
"""
self._conn.send_json(message.execute_javascript(code,
coffeescript=coffeescript, obj=json.dumps(obj,separators=(',', ':'))))
def execute_coffeescript(self, *args, **kwds):
"""
This is the same as salvus.execute_javascript, but with coffeescript=True.
See the docs for the top-level javascript function for more details.
"""
kwds['coffeescript'] = True
self.execute_javascript(*args, **kwds)
def _cython(self, filename, **opts):
"""
Return module obtained by compiling the Cython code in the
given file.
INPUT:
- filename -- name of a Cython file
- all other options are passed to sage.misc.cython.cython unchanged,
except for use_cache which defaults to True (instead of False)
OUTPUT:
- a module
"""
if 'use_cache' not in opts:
opts['use_cache'] = True
import sage.misc.cython
modname, path = sage.misc.cython.cython(filename, **opts)
import sys
try:
sys.path.insert(0,path)
module = __import__(modname)
finally:
del sys.path[0]
return module
def _import_code(self, content, **opts):
while True:
py_file_base = uuid().replace('-','_')
if not os.path.exists(py_file_base + '.py'):
break
try:
open(py_file_base+'.py', 'w').write(content)
import sys
try:
sys.path.insert(0, os.path.abspath('.'))
mod = __import__(py_file_base)
finally:
del sys.path[0]
finally:
os.unlink(py_file_base+'.py')
os.unlink(py_file_base+'.pyc')
return mod
def _sage(self, filename, **opts):
import sage.misc.preparser
content = "from sage.all import *\n" + sage.misc.preparser.preparse_file(open(filename).read())
return self._import_code(content, **opts)
def _spy(self, filename, **opts):
import sage.misc.preparser
content = "from sage.all import Integer, RealNumber, PolynomialRing\n" + sage.misc.preparser.preparse_file(open(filename).read())
return self._import_code(content, **opts)
def _py(self, filename, **opts):
return __import__(filename)
def require(self, filename, **opts):
if not os.path.exists(filename):
raise ValueError("file '%s' must exist"%filename)
base,ext = os.path.splitext(filename)
if ext == '.pyx' or ext == '.spyx':
return self._cython(filename, **opts)
if ext == ".sage":
return self._sage(filename, **opts)
if ext == ".spy":
return self._spy(filename, **opts)
if ext == ".py":
return self._py(filename, **opts)
raise NotImplementedError("require file of type %s not implemented"%ext)
def typeset_mode(self, on=True):
sage_salvus.typeset_mode(on)
def project_info(self):
"""
Return a dictionary with information about the project in which this code is running.
EXAMPLES::
sage: salvus.project_info()
{"stdout":"{u'project_id': u'...', u'location': {u'username': u'teaAuZ9M', u'path': u'.', u'host': u'localhost', u'port': 22}, u'base_url': u'/...'}\n"}
"""
return INFO
Salvus.pdf.__func__.__doc__ = sage_salvus.show_pdf.__doc__
Salvus.raw_input.__func__.__doc__ = sage_salvus.raw_input.__doc__
Salvus.clear.__func__.__doc__ = sage_salvus.clear.__doc__
Salvus.delete_last_output.__func__.__doc__ = sage_salvus.delete_last_output.__doc__
def execute(conn, id, code, data, cell_id, preparse, message_queue):
salvus = Salvus(conn=conn, id=id, data=data, message_queue=message_queue, cell_id=cell_id)
#salvus.start_executing() # with our new mainly client-side execution this isn't needed; not doing this makes evaluation roundtrip around 100ms instead of 200ms too, which is a major win.
try:
# initialize the salvus output streams
streams = (sys.stdout, sys.stderr)
sys.stdout = BufferedOutputStream(salvus.stdout)
sys.stderr = BufferedOutputStream(salvus.stderr)
try:
# initialize more salvus functionality
sage_salvus.salvus = salvus
namespace['sage_salvus'] = sage_salvus
except:
traceback.print_exc()
if salvus._prefix:
if not code.startswith("%"):
code = salvus._prefix + '\n' + code
if salvus._postfix:
code += '\n' + salvus._postfix
salvus.execute(code, namespace=namespace, preparse=preparse)
finally:
# there must be exactly one done message, unless salvus._done is False.
if sys.stderr._buf:
if sys.stdout._buf:
sys.stdout.flush()
sys.stderr.flush(done=salvus._done)
else:
sys.stdout.flush(done=salvus._done)
(sys.stdout, sys.stderr) = streams
def drop_privileges(id, home, transient, username):
gid = id
uid = id
if transient:
os.chown(home, uid, gid)
os.setgid(gid)
os.setuid(uid)
os.environ['DOT_SAGE'] = home
mpl = os.environ['MPLCONFIGDIR']
os.environ['MPLCONFIGDIR'] = home + mpl[5:]
os.environ['HOME'] = home
os.environ['IPYTHON_DIR'] = home
os.environ['USERNAME'] = username
os.environ['USER'] = username
os.chdir(home)
# Monkey patch the Sage library and anything else that does not
# deal well with changing user. This sucks, but it is work that
# simply must be done because we're not importing the library from
# scratch (which would take a long time).
import sage.misc.misc
sage.misc.misc.DOT_SAGE = home + '/.sage/'
class MessageQueue(list):
def __init__(self, conn):
self.queue = []
self.conn = conn
def __repr__(self):
return "Sage Server Message Queue"
def __getitem__(self, i):
return self.queue[i]
def __delitem__(self, i):
del self.queue[i]
def next_mesg(self):
"""
Remove oldest message from the queue and return it.
If the queue is empty, wait for a message to arrive
and return it (does not place it in the queue).
"""
if self.queue:
return self.queue.pop()
else:
return self.conn.recv()
def recv(self):
"""
Wait until one message is received and enqueue it.
Also returns the mesg.
"""
mesg = self.conn.recv()
self.queue.insert(0,mesg)
return mesg
def session(conn):
"""
This is run by the child process that is forked off on each new
connection. It drops privileges, then handles the complete
compute session.
INPUT:
- ``conn`` -- the TCP connection
"""
mq = MessageQueue(conn)
pid = os.getpid()
# seed the random number generator(s)
import sage.all; sage.all.set_random_seed()
import random; random.seed(sage.all.initial_seed())
# get_memory_usage is not aware of being forked...
import sage.misc.getusage
sage.misc.getusage._proc_status = "/proc/%s/status"%os.getpid()
cnt = 0
while True:
try:
typ, mesg = mq.next_mesg()
#print 'INFO:child%s: received message "%s"'%(pid, mesg)
log("handling message ", truncate_text(unicode8(mesg), 400)[0])
event = mesg['event']
if event == 'terminate_session':
return
elif event == 'execute_code':
try:
execute(conn = conn,
id = mesg['id'],
code = mesg['code'],
data = mesg.get('data',None),
cell_id = mesg.get('cell_id',None),
preparse = mesg['preparse'],
message_queue = mq)
except Exception, err:
log("ERROR -- exception raised '%s' when executing '%s'"%(err, mesg['code']))
elif event == 'introspect':
try:
introspect(conn=conn, id=mesg['id'], line=mesg['line'], preparse=mesg['preparse'])
except:
pass
else:
raise RuntimeError("invalid message '%s'"%mesg)
except:
# When hub connection dies, loop goes crazy.
# Unfortunately, just catching SIGINT doesn't seem to
# work, and leads to random exits during a
# session. Howeer, when connection dies, 10000 iterations
# happen almost instantly. Ugly, but it works.
cnt += 1
if cnt > 10000:
sys.exit(0)
else:
pass
def introspect(conn, id, line, preparse):
salvus = Salvus(conn=conn, id=id) # so salvus.[tab] works -- note that Salvus(...) modifies namespace.
z = sage_parsing.introspect(line, namespace=namespace, preparse=preparse)
if z['get_completions']:
mesg = message.introspect_completions(id=id, completions=z['result'], target=z['target'])
elif z['get_help']:
mesg = message.introspect_docstring(id=id, docstring=z['result'], target=z['expr'])
elif z['get_source']:
mesg = message.introspect_source_code(id=id, source_code=z['result'], target=z['expr'])
conn.send_json(mesg)
def handle_session_term(signum, frame):
while True:
try:
pid, exit_status = os.waitpid(-1, os.WNOHANG)
except:
return
if not pid: return
secret_token = None
secret_token_path = os.path.join(os.environ['SMC'], 'secret_token')
def unlock_conn(conn):
global secret_token
if secret_token is None:
try:
secret_token = open(secret_token_path).read().strip()
except:
conn.send('n')
conn.send("Unable to accept connection, since Sage server doesn't yet know the secret token; unable to read from '%s'"%secret_token_path)
conn.close()
n = len(secret_token)
token = ''
while len(token) < n:
token += conn.recv(n)
if token != secret_token[:len(token)]:
break # definitely not right -- don't try anymore
if token != secret_token:
log("token='%s'; secret_token='%s'"%(token, secret_token))
conn.send('n') # no -- invalid login
conn.send("Invalid secret token.")
conn.close()
return False
else:
conn.send('y') # yes -- valid login
return True
def serve_connection(conn):
global PID
PID = os.getpid()
# First the client *must* send the secret shared token. If they
# don't, we return (and the connection will have been destroyed by
# unlock_conn).
log("Serving a connection")
log("Waiting for client to unlock the connection...")
# TODO -- put in a timeout (?)
if not unlock_conn(conn):
log("Client failed to unlock connection. Dumping them.")
return
log("Connection unlocked.")
try:
conn = ConnectionJSON(conn)
typ, mesg = conn.recv()
log("Received message %s"%mesg)
except Exception, err:
log("Error receiving message: %s (connection terminated)"%str(err))
raise
if mesg['event'] == 'send_signal':
if mesg['pid'] == 0:
log("invalid signal mesg (pid=0)")
else:
log("Sending a signal")
os.kill(mesg['pid'], mesg['signal'])
return
if mesg['event'] != 'start_session':
log("Received an unknown message event = %s; terminating session."%mesg['event'])
return
log("Starting a session")
desc = message.session_description(os.getpid())
log("child sending session description back: %s"%desc)
conn.send_json(desc)
session(conn=conn)
def serve(port, host, extra_imports=False):
#log.info('opening connection on port %s', port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# check for children that have finished every few seconds, so
# we don't end up with zombies.
s.settimeout(5)
s.bind((host, port))
log('Sage server %s:%s'%(host, port))
# Enabling the following signal completely breaks subprocess pexpect in many cases, which is
# obviously totally unacceptable.
#signal.signal(signal.SIGCHLD, handle_session_term)
def init_library():
tm = time.time()
log("pre-importing the sage library...")
# FOR testing purposes.
##log("fake 40 second pause to slow things down for testing....")
##time.sleep(40)
##log("done with pause")
# Monkey patching interact using the new and improved Salvus
# implementation of interact.
import sagenb.notebook.interact
sagenb.notebook.interact.interact = sage_salvus.interact
# Actually import sage now. This must happen after the interact
# import because of library interacts.
log("import sage...")
import sage.all
log("imported sage.")
# Monkey patch the html command.
import sage.interacts.library
sage.all.html = sage.misc.html.html = sage.interacts.library.html = sage_salvus.html
# Set a useful figsize default; the matplotlib one is not notebook friendly.
import sage.plot.graphics
sage.plot.graphics.Graphics.SHOW_OPTIONS['figsize']=[8,4]
# Monkey patch latex.eval, so that %latex works in worksheets
sage.misc.latex.latex.eval = sage_salvus.latex0
# Plot, integrate, etc., -- so startup time of worksheets is minimal.
cmds = ['from sage.all import *',
'from sage.calculus.predefined import x',
'import pylab']
if extra_imports:
cmds.extend(['import scipy',
'import sympy',
"plot(sin).save('%s/a.png'%os.environ['SMC'], figsize=2)",
'integrate(sin(x**2),x)'])
tm0 = time.time()
for cmd in cmds:
log(cmd)
exec cmd in namespace
global pylab
pylab = namespace['pylab'] # used for clearing
log('imported sage library and other components in %s seconds'%(time.time() - tm))
for k,v in sage_salvus.interact_functions.iteritems():
namespace[k] = sagenb.notebook.interact.__dict__[k] = v
namespace['_salvus_parsing'] = sage_parsing
for name in ['coffeescript', 'javascript', 'time', 'timeit', 'capture', 'cython',
'script', 'python', 'python3', 'perl', 'ruby', 'sh', 'prun', 'show', 'auto',
'hide', 'hideall', 'cell', 'fork', 'exercise', 'dynamic', 'var',
'reset', 'restore', 'md', 'load', 'runfile', 'typeset_mode', 'default_mode',
'sage_chat', 'fortran', 'magics', 'go', 'julia', 'pandoc', 'wiki', 'plot3d_using_matplotlib',
'mediawiki', 'help', 'raw_input', 'clear', 'delete_last_output', 'sage_eval']:
namespace[name] = getattr(sage_salvus, name)
namespace['sage_server'] = sys.modules[__name__] # http://stackoverflow.com/questions/1676835/python-how-do-i-get-a-reference-to-a-module-inside-the-module-itself
# alias pretty_print_default to typeset_mode, since sagenb has/uses that.
namespace['pretty_print_default'] = namespace['typeset_mode']
# and monkey patch it
sage.misc.latex.pretty_print_default = namespace['pretty_print_default']
sage_salvus.default_namespace = dict(namespace)
log("setup namespace with extra functions")
# Sage's pretty_print and view are both ancient and a mess
sage.all.pretty_print = sage.misc.latex.pretty_print = namespace['pretty_print'] = namespace['view'] = namespace['show']
# this way client code can tell it is running as a Sage Worksheet.
namespace['__SAGEWS__'] = True
log("Initialize sage library.")
init_library()
t = time.time()
s.listen(128)
i = 0
children = {}
log("Starting server listening for connections")
try:
while True:
i += 1
#print i, time.time()-t, 'cps: ', int(i/(time.time()-t))
# do not use log.info(...) in the server loop; threads = race conditions that hang server every so often!!
try:
if children:
for pid in children.keys():
if os.waitpid(pid, os.WNOHANG) != (0,0):
log("subprocess %s terminated, closing connection"%pid)
conn.close()
del children[pid]
try:
conn, addr = s.accept()
log("Accepted a connection from", addr)
except:
# this will happen periodically since we did s.settimeout above, so
# that we wait for children above periodically.
continue
except socket.error, msg:
continue
child_pid = os.fork()
if child_pid: # parent
log("forked off child with pid %s to handle this connection"%child_pid)
children[child_pid] = conn
else:
# child
global PID
PID = os.getpid()
log("child process, will now serve this new connection")
serve_connection(conn)
# end while
except Exception, err:
log("Error taking connection: ", err)
traceback.print_exc(file=sys.stdout)
#log.error("error: %s %s", type(err), str(err))
finally:
log("closing socket")
#s.shutdown(0)
s.close()
def run_server(port, host, pidfile, logfile=None):
global LOGFILE
if logfile:
LOGFILE = logfile
if pidfile:
open(pidfile,'w').write(str(os.getpid()))
log("run_server: port=%s, host=%s, pidfile='%s', logfile='%s'"%(port, host, pidfile, LOGFILE))
try:
serve(port, host)
finally:
if pidfile:
os.unlink(pidfile)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Run Sage server")
parser.add_argument("-p", dest="port", type=int, default=0,
help="port to listen on (default: 0); 0 = automatically allocated; saved to $SMC/data/sage_server.port")
parser.add_argument("-l", dest='log_level', type=str, default='INFO',
help="log level (default: INFO) useful options include WARNING and DEBUG")
parser.add_argument("-d", dest="daemon", default=False, action="store_const", const=True,
help="daemon mode (default: False)")
parser.add_argument("--host", dest="host", type=str, default='127.0.0.1',
help="host interface to bind to -- default is 127.0.0.1")
parser.add_argument("--pidfile", dest="pidfile", type=str, default='',
help="store pid in this file")
parser.add_argument("--logfile", dest="logfile", type=str, default='',
help="store log in this file (default: '' = don't log to a file)")
parser.add_argument("-c", dest="client", default=False, action="store_const", const=True,
help="run in test client mode number 1 (command line)")
parser.add_argument("--hostname", dest="hostname", type=str, default='',
help="hostname to connect to in client mode")
parser.add_argument("--portfile", dest="portfile", type=str, default='',
help="write port to this file")
args = parser.parse_args()
if args.daemon and not args.pidfile:
print "%s: must specify pidfile in daemon mode"%sys.argv[0]
sys.exit(1)
if args.log_level:
pass
#level = getattr(logging, args.log_level.upper())
#log.setLevel(level)
if args.client:
client1(port=args.port if args.port else int(open(args.portfile).read()), hostname=args.hostname)
sys.exit(0)
if not args.port:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM); s.bind(('',0)) # pick a free port
args.port = s.getsockname()[1]
del s
if args.portfile:
open(args.portfile,'w').write(str(args.port))
pidfile = os.path.abspath(args.pidfile) if args.pidfile else ''
logfile = os.path.abspath(args.logfile) if args.logfile else ''
if logfile:
LOGFILE = logfile
open(LOGFILE, 'w') # for now we clear it on restart...
log("setting logfile to %s"%LOGFILE)
main = lambda: run_server(port=args.port, host=args.host, pidfile=pidfile)
if args.daemon and args.pidfile:
import daemon
daemon.daemonize(args.pidfile)
main()
else:
main()
| gpl-3.0 |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/matplotlib/artist.py | 2 | 45464 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from collections import OrderedDict, namedtuple
from functools import wraps
import inspect
import re
import warnings
import numpy as np
import matplotlib
from . import cbook, docstring, rcParams
from .path import Path
from .transforms import (Bbox, IdentityTransform, Transform, TransformedBbox,
TransformedPatchPath, TransformedPath)
# Note, matplotlib artists use the doc strings for set and get
# methods to enable the introspection methods of setp and getp. Every
# set_* method should have a docstring containing the line
#
# ACCEPTS: [ legal | values ]
#
# and aliases for setters and getters should have a docstring that
# starts with 'alias for ', as in 'alias for set_somemethod'
#
# You may wonder why we use so much boiler-plate manually defining the
# set_alias and get_alias functions, rather than using some clever
# python trick. The answer is that I need to be able to manipulate
# the docstring, and there is no clever way to do that in python 2.2,
# as far as I can see - see
#
# https://mail.python.org/pipermail/python-list/2004-October/242925.html
def allow_rasterization(draw):
"""
Decorator for Artist.draw method. Provides routines
that run before and after the draw call. The before and after functions
are useful for changing artist-dependent renderer attributes or making
other setup function calls, such as starting and flushing a mixed-mode
renderer.
"""
# the axes class has a second argument inframe for its draw method.
@wraps(draw)
def draw_wrapper(artist, renderer, *args, **kwargs):
try:
if artist.get_rasterized():
renderer.start_rasterizing()
if artist.get_agg_filter() is not None:
renderer.start_filter()
return draw(artist, renderer, *args, **kwargs)
finally:
if artist.get_agg_filter() is not None:
renderer.stop_filter(artist.get_agg_filter())
if artist.get_rasterized():
renderer.stop_rasterizing()
draw_wrapper._supports_rasterization = True
return draw_wrapper
def _stale_axes_callback(self, val):
if self.axes:
self.axes.stale = val
_XYPair = namedtuple("_XYPair", "x y")
class Artist(object):
"""
Abstract base class for someone who renders into a
:class:`FigureCanvas`.
"""
aname = 'Artist'
zorder = 0
# order of precedence when bulk setting/updating properties
# via update. The keys should be property names and the values
# integers
_prop_order = dict(color=-1)
def __init__(self):
self._stale = True
self.stale_callback = None
self._axes = None
self.figure = None
self._transform = None
self._transformSet = False
self._visible = True
self._animated = False
self._alpha = None
self.clipbox = None
self._clippath = None
self._clipon = True
self._label = ''
self._picker = None
self._contains = None
self._rasterized = None
self._agg_filter = None
self._mouseover = False
self.eventson = False # fire events only if eventson
self._oid = 0 # an observer id
self._propobservers = {} # a dict from oids to funcs
try:
self.axes = None
except AttributeError:
# Handle self.axes as a read-only property, as in Figure.
pass
self._remove_method = None
self._url = None
self._gid = None
self._snap = None
self._sketch = rcParams['path.sketch']
self._path_effects = rcParams['path.effects']
self._sticky_edges = _XYPair([], [])
def __getstate__(self):
d = self.__dict__.copy()
# remove the unpicklable remove method, this will get re-added on load
# (by the axes) if the artist lives on an axes.
d['_remove_method'] = None
d['stale_callback'] = None
return d
def remove(self):
"""
Remove the artist from the figure if possible. The effect
will not be visible until the figure is redrawn, e.g., with
:meth:`matplotlib.axes.Axes.draw_idle`. Call
:meth:`matplotlib.axes.Axes.relim` to update the axes limits
if desired.
Note: :meth:`~matplotlib.axes.Axes.relim` will not see
collections even if the collection was added to axes with
*autolim* = True.
Note: there is no support for removing the artist's legend entry.
"""
# There is no method to set the callback. Instead the parent should
# set the _remove_method attribute directly. This would be a
# protected attribute if Python supported that sort of thing. The
# callback has one parameter, which is the child to be removed.
if self._remove_method is not None:
self._remove_method(self)
# clear stale callback
self.stale_callback = None
_ax_flag = False
if hasattr(self, 'axes') and self.axes:
# remove from the mouse hit list
self.axes.mouseover_set.discard(self)
# mark the axes as stale
self.axes.stale = True
# decouple the artist from the axes
self.axes = None
_ax_flag = True
if self.figure:
self.figure = None
if not _ax_flag:
self.figure = True
else:
raise NotImplementedError('cannot remove artist')
# TODO: the fix for the collections relim problem is to move the
# limits calculation into the artist itself, including the property of
# whether or not the artist should affect the limits. Then there will
# be no distinction between axes.add_line, axes.add_patch, etc.
# TODO: add legend support
def have_units(self):
'Return *True* if units are set on the *x* or *y* axes'
ax = self.axes
if ax is None or ax.xaxis is None:
return False
return ax.xaxis.have_units() or ax.yaxis.have_units()
def convert_xunits(self, x):
"""For artists in an axes, if the xaxis has units support,
convert *x* using xaxis unit type
"""
ax = getattr(self, 'axes', None)
if ax is None or ax.xaxis is None:
return x
return ax.xaxis.convert_units(x)
def convert_yunits(self, y):
"""For artists in an axes, if the yaxis has units support,
convert *y* using yaxis unit type
"""
ax = getattr(self, 'axes', None)
if ax is None or ax.yaxis is None:
return y
return ax.yaxis.convert_units(y)
@property
def axes(self):
"""
The :class:`~matplotlib.axes.Axes` instance the artist
resides in, or *None*.
"""
return self._axes
@axes.setter
def axes(self, new_axes):
if (new_axes is not None and self._axes is not None
and new_axes != self._axes):
raise ValueError("Can not reset the axes. You are probably "
"trying to re-use an artist in more than one "
"Axes which is not supported")
self._axes = new_axes
if new_axes is not None and new_axes is not self:
self.stale_callback = _stale_axes_callback
return new_axes
@property
def stale(self):
"""
If the artist is 'stale' and needs to be re-drawn for the output to
match the internal state of the artist.
"""
return self._stale
@stale.setter
def stale(self, val):
self._stale = val
# if the artist is animated it does not take normal part in the
# draw stack and is not expected to be drawn as part of the normal
# draw loop (when not saving) so do not propagate this change
if self.get_animated():
return
if val and self.stale_callback is not None:
self.stale_callback(self, val)
def get_window_extent(self, renderer):
"""
Get the axes bounding box in display space.
Subclasses should override for inclusion in the bounding box
"tight" calculation. Default is to return an empty bounding
box at 0, 0.
Be careful when using this function, the results will not update
if the artist window extent of the artist changes. The extent
can change due to any changes in the transform stack, such as
changing the axes limits, the figure size, or the canvas used
(as is done when saving a figure). This can lead to unexpected
behavior where interactive figures will look fine on the screen,
but will save incorrectly.
"""
return Bbox([[0, 0], [0, 0]])
def add_callback(self, func):
"""
Adds a callback function that will be called whenever one of
the :class:`Artist`'s properties changes.
Returns an *id* that is useful for removing the callback with
:meth:`remove_callback` later.
"""
oid = self._oid
self._propobservers[oid] = func
self._oid += 1
return oid
def remove_callback(self, oid):
"""
Remove a callback based on its *id*.
.. seealso::
:meth:`add_callback`
For adding callbacks
"""
try:
del self._propobservers[oid]
except KeyError:
pass
def pchanged(self):
"""
Fire an event when property changed, calling all of the
registered callbacks.
"""
for oid, func in six.iteritems(self._propobservers):
func(self)
def is_transform_set(self):
"""
Returns *True* if :class:`Artist` has a transform explicitly
set.
"""
return self._transformSet
def set_transform(self, t):
"""
Set the :class:`~matplotlib.transforms.Transform` instance
used by this artist.
ACCEPTS: :class:`~matplotlib.transforms.Transform` instance
"""
self._transform = t
self._transformSet = True
self.pchanged()
self.stale = True
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform`
instance used by this artist.
"""
if self._transform is None:
self._transform = IdentityTransform()
elif (not isinstance(self._transform, Transform)
and hasattr(self._transform, '_as_mpl_transform')):
self._transform = self._transform._as_mpl_transform(self.axes)
return self._transform
def hitlist(self, event):
"""
List the children of the artist which contain the mouse event *event*.
"""
L = []
try:
hascursor, info = self.contains(event)
if hascursor:
L.append(self)
except:
import traceback
traceback.print_exc()
print("while checking", self.__class__)
for a in self.get_children():
L.extend(a.hitlist(event))
return L
def get_children(self):
"""
Return a list of the child :class:`Artist`s this
:class:`Artist` contains.
"""
return []
def contains(self, mouseevent):
"""Test whether the artist contains the mouse event.
Returns the truth value and a dictionary of artist specific details of
selection, such as which points are contained in the pick radius. See
individual artists for details.
"""
if callable(self._contains):
return self._contains(self, mouseevent)
warnings.warn("'%s' needs 'contains' method" % self.__class__.__name__)
return False, {}
def set_contains(self, picker):
"""
Replace the contains test used by this artist. The new picker
should be a callable function which determines whether the
artist is hit by the mouse event::
hit, props = picker(artist, mouseevent)
If the mouse event is over the artist, return *hit* = *True*
and *props* is a dictionary of properties you want returned
with the contains test.
ACCEPTS: a callable function
"""
self._contains = picker
def get_contains(self):
"""
Return the _contains test used by the artist, or *None* for default.
"""
return self._contains
def pickable(self):
'Return *True* if :class:`Artist` is pickable.'
return (self.figure is not None and
self.figure.canvas is not None and
self._picker is not None)
def pick(self, mouseevent):
"""
Process pick event
each child artist will fire a pick event if *mouseevent* is over
the artist and the artist has picker set
"""
# Pick self
if self.pickable():
picker = self.get_picker()
if callable(picker):
inside, prop = picker(self, mouseevent)
else:
inside, prop = self.contains(mouseevent)
if inside:
self.figure.canvas.pick_event(mouseevent, self, **prop)
# Pick children
for a in self.get_children():
# make sure the event happened in the same axes
ax = getattr(a, 'axes', None)
if (mouseevent.inaxes is None or ax is None
or mouseevent.inaxes == ax):
# we need to check if mouseevent.inaxes is None
# because some objects associated with an axes (e.g., a
# tick label) can be outside the bounding box of the
# axes and inaxes will be None
# also check that ax is None so that it traverse objects
# which do no have an axes property but children might
a.pick(mouseevent)
def set_picker(self, picker):
"""
Set the epsilon for picking used by this artist
*picker* can be one of the following:
* *None*: picking is disabled for this artist (default)
* A boolean: if *True* then picking will be enabled and the
artist will fire a pick event if the mouse event is over
the artist
* A float: if picker is a number it is interpreted as an
epsilon tolerance in points and the artist will fire
off an event if it's data is within epsilon of the mouse
event. For some artists like lines and patch collections,
the artist may provide additional data to the pick event
that is generated, e.g., the indices of the data within
epsilon of the pick event
* A function: if picker is callable, it is a user supplied
function which determines whether the artist is hit by the
mouse event::
hit, props = picker(artist, mouseevent)
to determine the hit test. if the mouse event is over the
artist, return *hit=True* and props is a dictionary of
properties you want added to the PickEvent attributes.
ACCEPTS: [None|float|boolean|callable]
"""
self._picker = picker
def get_picker(self):
'Return the picker object used by this artist'
return self._picker
def is_figure_set(self):
"""
Returns True if the artist is assigned to a
:class:`~matplotlib.figure.Figure`.
"""
return self.figure is not None
def get_url(self):
"""
Returns the url
"""
return self._url
def set_url(self, url):
"""
Sets the url for the artist
ACCEPTS: a url string
"""
self._url = url
def get_gid(self):
"""
Returns the group id
"""
return self._gid
def set_gid(self, gid):
"""
Sets the (group) id for the artist
ACCEPTS: an id string
"""
self._gid = gid
def get_snap(self):
"""
Returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
Only supported by the Agg and MacOSX backends.
"""
if rcParams['path.snap']:
return self._snap
else:
return False
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
Only supported by the Agg and MacOSX backends.
"""
self._snap = snap
self.stale = True
def get_sketch_params(self):
"""
Returns the sketch parameters for the artist.
Returns
-------
sketch_params : tuple or `None`
A 3-tuple with the following elements:
* `scale`: The amplitude of the wiggle perpendicular to the
source line.
* `length`: The length of the wiggle along the line.
* `randomness`: The scale factor by which the length is
shrunken or expanded.
May return `None` if no sketch parameters were set.
"""
return self._sketch
def set_sketch_params(self, scale=None, length=None, randomness=None):
"""
Sets the sketch parameters.
Parameters
----------
scale : float, optional
The amplitude of the wiggle perpendicular to the source
line, in pixels. If scale is `None`, or not provided, no
sketch filter will be provided.
length : float, optional
The length of the wiggle along the line, in pixels
(default 128.0)
randomness : float, optional
The scale factor by which the length is shrunken or
expanded (default 16.0)
"""
if scale is None:
self._sketch = None
else:
self._sketch = (scale, length or 128.0, randomness or 16.0)
self.stale = True
def set_path_effects(self, path_effects):
"""
set path_effects, which should be a list of instances of
matplotlib.patheffect._Base class or its derivatives.
"""
self._path_effects = path_effects
self.stale = True
def get_path_effects(self):
return self._path_effects
def get_figure(self):
"""
Return the :class:`~matplotlib.figure.Figure` instance the
artist belongs to.
"""
return self.figure
def set_figure(self, fig):
"""
Set the :class:`~matplotlib.figure.Figure` instance the artist
belongs to.
ACCEPTS: a :class:`matplotlib.figure.Figure` instance
"""
# if this is a no-op just return
if self.figure is fig:
return
# if we currently have a figure (the case of both `self.figure`
# and `fig` being none is taken care of above) we then user is
# trying to change the figure an artist is associated with which
# is not allowed for the same reason as adding the same instance
# to more than one Axes
if self.figure is not None:
raise RuntimeError("Can not put single artist in "
"more than one figure")
self.figure = fig
if self.figure and self.figure is not self:
self.pchanged()
self.stale = True
def set_clip_box(self, clipbox):
"""
Set the artist's clip :class:`~matplotlib.transforms.Bbox`.
ACCEPTS: a :class:`matplotlib.transforms.Bbox` instance
"""
self.clipbox = clipbox
self.pchanged()
self.stale = True
def set_clip_path(self, path, transform=None):
"""
Set the artist's clip path, which may be:
- a :class:`~matplotlib.patches.Patch` (or subclass) instance; or
- a :class:`~matplotlib.path.Path` instance, in which case a
:class:`~matplotlib.transforms.Transform` instance, which will be
applied to the path before using it for clipping, must be provided;
or
- ``None``, to remove a previously set clipping path.
For efficiency, if the path happens to be an axis-aligned rectangle,
this method will set the clipping box to the corresponding rectangle
and set the clipping path to ``None``.
ACCEPTS: [ (:class:`~matplotlib.path.Path`,
:class:`~matplotlib.transforms.Transform`) |
:class:`~matplotlib.patches.Patch` | None ]
"""
from matplotlib.patches import Patch, Rectangle
success = False
if transform is None:
if isinstance(path, Rectangle):
self.clipbox = TransformedBbox(Bbox.unit(),
path.get_transform())
self._clippath = None
success = True
elif isinstance(path, Patch):
self._clippath = TransformedPatchPath(path)
success = True
elif isinstance(path, tuple):
path, transform = path
if path is None:
self._clippath = None
success = True
elif isinstance(path, Path):
self._clippath = TransformedPath(path, transform)
success = True
elif isinstance(path, TransformedPatchPath):
self._clippath = path
success = True
elif isinstance(path, TransformedPath):
self._clippath = path
success = True
if not success:
raise TypeError(
"Invalid arguments to set_clip_path, of type {} and {}"
.format(type(path).__name__, type(transform).__name__))
# This may result in the callbacks being hit twice, but guarantees they
# will be hit at least once.
self.pchanged()
self.stale = True
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on all
backends
"""
return self._alpha
def get_visible(self):
"Return the artist's visiblity"
return self._visible
def get_animated(self):
"Return the artist's animated state"
return self._animated
def get_clip_on(self):
'Return whether artist uses clipping'
return self._clipon
def get_clip_box(self):
'Return artist clipbox'
return self.clipbox
def get_clip_path(self):
'Return artist clip path'
return self._clippath
def get_transformed_clip_path_and_affine(self):
'''
Return the clip path with the non-affine part of its
transformation applied, and the remaining affine part of its
transformation.
'''
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def set_clip_on(self, b):
"""
Set whether artist uses clipping.
When False artists will be visible out side of the axes which
can lead to unexpected results.
ACCEPTS: [True | False]
"""
self._clipon = b
# This may result in the callbacks being hit twice, but ensures they
# are hit at least once
self.pchanged()
self.stale = True
def _set_gc_clip(self, gc):
'Set the clip properly for the gc'
if self._clipon:
if self.clipbox is not None:
gc.set_clip_rectangle(self.clipbox)
gc.set_clip_path(self._clippath)
else:
gc.set_clip_rectangle(None)
gc.set_clip_path(None)
def get_rasterized(self):
"return True if the artist is to be rasterized"
return self._rasterized
def set_rasterized(self, rasterized):
"""
Force rasterized (bitmap) drawing in vector backend output.
Defaults to None, which implies the backend's default behavior
ACCEPTS: [True | False | None]
"""
if rasterized and not hasattr(self.draw, "_supports_rasterization"):
warnings.warn("Rasterization of '%s' will be ignored" % self)
self._rasterized = rasterized
def get_agg_filter(self):
"return filter function to be used for agg filter"
return self._agg_filter
def set_agg_filter(self, filter_func):
"""
set agg_filter function.
"""
self._agg_filter = filter_func
self.stale = True
def draw(self, renderer, *args, **kwargs):
'Derived classes drawing method'
if not self.get_visible():
return
self.stale = False
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends.
ACCEPTS: float (0.0 transparent through 1.0 opaque)
"""
self._alpha = alpha
self.pchanged()
self.stale = True
def set_visible(self, b):
"""
Set the artist's visiblity.
ACCEPTS: [True | False]
"""
self._visible = b
self.pchanged()
self.stale = True
def set_animated(self, b):
"""
Set the artist's animation state.
ACCEPTS: [True | False]
"""
if self._animated != b:
self._animated = b
self.pchanged()
def update(self, props):
"""
Update the properties of this :class:`Artist` from the
dictionary *prop*.
"""
def _update_property(self, k, v):
"""sorting out how to update property (setter or setattr)
Parameters
----------
k : str
The name of property to update
v : obj
The value to assign to the property
Returns
-------
ret : obj or None
If using a `set_*` method return it's return, else None.
"""
k = k.lower()
# white list attributes we want to be able to update through
# art.update, art.set, setp
if k in {'axes'}:
return setattr(self, k, v)
else:
func = getattr(self, 'set_' + k, None)
if not callable(func):
raise AttributeError('Unknown property %s' % k)
return func(v)
store = self.eventson
self.eventson = False
try:
ret = [_update_property(self, k, v)
for k, v in props.items()]
finally:
self.eventson = store
if len(ret):
self.pchanged()
self.stale = True
return ret
def get_label(self):
"""
Get the label used for this artist in the legend.
"""
return self._label
def set_label(self, s):
"""
Set the label to *s* for auto legend.
ACCEPTS: string or anything printable with '%s' conversion.
"""
if s is not None:
self._label = '%s' % (s, )
else:
self._label = None
self.pchanged()
self.stale = True
def get_zorder(self):
"""
Return the :class:`Artist`'s zorder.
"""
return self.zorder
def set_zorder(self, level):
"""
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
ACCEPTS: any number
"""
self.zorder = level
self.pchanged()
self.stale = True
@property
def sticky_edges(self):
"""
`x` and `y` sticky edge lists.
When performing autoscaling, if a data limit coincides with a value in
the corresponding sticky_edges list, then no margin will be added--the
view limit "sticks" to the edge. A typical usecase is histograms,
where one usually expects no margin on the bottom edge (0) of the
histogram.
This attribute cannot be assigned to; however, the `x` and `y` lists
can be modified in place as needed.
Examples
--------
>>> artist.sticky_edges.x[:] = (xmin, xmax)
>>> artist.sticky_edges.y[:] = (ymin, ymax)
"""
return self._sticky_edges
def update_from(self, other):
'Copy properties from *other* to *self*.'
self._transform = other._transform
self._transformSet = other._transformSet
self._visible = other._visible
self._alpha = other._alpha
self.clipbox = other.clipbox
self._clipon = other._clipon
self._clippath = other._clippath
self._label = other._label
self._sketch = other._sketch
self._path_effects = other._path_effects
self.sticky_edges.x[:] = other.sticky_edges.x[:]
self.sticky_edges.y[:] = other.sticky_edges.y[:]
self.pchanged()
self.stale = True
def properties(self):
"""
return a dictionary mapping property name -> value for all Artist props
"""
return ArtistInspector(self).properties()
def set(self, **kwargs):
"""A property batch setter. Pass *kwargs* to set properties.
"""
props = OrderedDict(
sorted(kwargs.items(), reverse=True,
key=lambda x: (self._prop_order.get(x[0], 0), x[0])))
return self.update(props)
def findobj(self, match=None, include_self=True):
"""
Find artist objects.
Recursively find all :class:`~matplotlib.artist.Artist` instances
contained in self.
*match* can be
- None: return all objects contained in artist.
- function with signature ``boolean = match(artist)``
used to filter matches
- class instance: e.g., Line2D. Only return artists of class type.
If *include_self* is True (default), include self in the list to be
checked for a match.
"""
if match is None: # always return True
def matchfunc(x):
return True
elif isinstance(match, type) and issubclass(match, Artist):
def matchfunc(x):
return isinstance(x, match)
elif callable(match):
matchfunc = match
else:
raise ValueError('match must be None, a matplotlib.artist.Artist '
'subclass, or a callable')
artists = sum([c.findobj(matchfunc) for c in self.get_children()], [])
if include_self and matchfunc(self):
artists.append(self)
return artists
def get_cursor_data(self, event):
"""
Get the cursor data for a given event.
"""
return None
def format_cursor_data(self, data):
"""
Return *cursor data* string formatted.
"""
try:
data[0]
except (TypeError, IndexError):
data = [data]
return ', '.join('{:0.3g}'.format(item) for item in data if
isinstance(item, (np.floating, np.integer, int, float)))
@property
def mouseover(self):
return self._mouseover
@mouseover.setter
def mouseover(self, val):
val = bool(val)
self._mouseover = val
ax = self.axes
if ax:
if val:
ax.mouseover_set.add(self)
else:
ax.mouseover_set.discard(self)
class ArtistInspector(object):
"""
A helper class to inspect an :class:`~matplotlib.artist.Artist`
and return information about it's settable properties and their
current values.
"""
def __init__(self, o):
"""
Initialize the artist inspector with an
:class:`~matplotlib.artist.Artist` or iterable of :class:`Artists`.
If an iterable is used, we assume it is a homogeneous sequence (all
:class:`Artists` are of the same type) and it is your responsibility
to make sure this is so.
"""
if cbook.iterable(o):
# Wrapped in list instead of doing try-except around next(iter(o))
o = list(o)
if len(o):
o = o[0]
self.oorig = o
if not inspect.isclass(o):
o = type(o)
self.o = o
self.aliasd = self.get_aliases()
def get_aliases(self):
"""
Get a dict mapping *fullname* -> *alias* for each *alias* in
the :class:`~matplotlib.artist.ArtistInspector`.
e.g., for lines::
{'markerfacecolor': 'mfc',
'linewidth' : 'lw',
}
"""
names = [name for name in dir(self.o)
if name.startswith(('set_', 'get_'))
and callable(getattr(self.o, name))]
aliases = {}
for name in names:
func = getattr(self.o, name)
if not self.is_alias(func):
continue
docstring = func.__doc__
fullname = docstring[10:]
aliases.setdefault(fullname[4:], {})[name[4:]] = None
return aliases
_get_valid_values_regex = re.compile(
r"\n\s*ACCEPTS:\s*((?:.|\n)*?)(?:$|(?:\n\n))"
)
def get_valid_values(self, attr):
"""
Get the legal arguments for the setter associated with *attr*.
This is done by querying the docstring of the function *set_attr*
for a line that begins with ACCEPTS:
e.g., for a line linestyle, return
"[ ``'-'`` | ``'--'`` | ``'-.'`` | ``':'`` | ``'steps'`` | ``'None'``
]"
"""
name = 'set_%s' % attr
if not hasattr(self.o, name):
raise AttributeError('%s has no function %s' % (self.o, name))
func = getattr(self.o, name)
docstring = func.__doc__
if docstring is None:
return 'unknown'
if docstring.startswith('alias for '):
return None
match = self._get_valid_values_regex.search(docstring)
if match is not None:
return re.sub("\n *", " ", match.group(1))
return 'unknown'
def _get_setters_and_targets(self):
"""
Get the attribute strings and a full path to where the setter
is defined for all setters in an object.
"""
setters = []
for name in dir(self.o):
if not name.startswith('set_'):
continue
func = getattr(self.o, name)
if not callable(func):
continue
if six.PY2:
nargs = len(inspect.getargspec(func)[0])
else:
nargs = len(inspect.getfullargspec(func)[0])
if nargs < 2 or self.is_alias(func):
continue
source_class = self.o.__module__ + "." + self.o.__name__
for cls in self.o.mro():
if name in cls.__dict__:
source_class = cls.__module__ + "." + cls.__name__
break
setters.append((name[4:], source_class + "." + name))
return setters
def get_setters(self):
"""
Get the attribute strings with setters for object. e.g., for a line,
return ``['markerfacecolor', 'linewidth', ....]``.
"""
return [prop for prop, target in self._get_setters_and_targets()]
def is_alias(self, o):
"""
Return *True* if method object *o* is an alias for another
function.
"""
ds = o.__doc__
if ds is None:
return False
return ds.startswith('alias for ')
def aliased_name(self, s):
"""
return 'PROPNAME or alias' if *s* has an alias, else return
PROPNAME.
e.g., for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'
"""
if s in self.aliasd:
return s + ''.join([' or %s' % x
for x in sorted(self.aliasd[s])])
else:
return s
def aliased_name_rest(self, s, target):
"""
return 'PROPNAME or alias' if *s* has an alias, else return
PROPNAME formatted for ReST
e.g., for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'
"""
if s in self.aliasd:
aliases = ''.join([' or %s' % x
for x in sorted(self.aliasd[s])])
else:
aliases = ''
return ':meth:`%s <%s>`%s' % (s, target, aliases)
def pprint_setters(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable properies
and their valid values.
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' % (pad, prop, accepts)
attrs = self._get_setters_and_targets()
attrs.sort()
lines = []
for prop, path in attrs:
accepts = self.get_valid_values(prop)
name = self.aliased_name(prop)
lines.append('%s%s: %s' % (pad, name, accepts))
return lines
def pprint_setters_rest(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable properies
and their valid values. Format the output for ReST
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' % (pad, prop, accepts)
attrs = self._get_setters_and_targets()
attrs.sort()
lines = []
########
names = [self.aliased_name_rest(prop, target)
for prop, target in attrs]
accepts = [self.get_valid_values(prop) for prop, target in attrs]
col0_len = max(len(n) for n in names)
col1_len = max(len(a) for a in accepts)
table_formatstr = pad + '=' * col0_len + ' ' + '=' * col1_len
lines.append('')
lines.append(table_formatstr)
lines.append(pad + 'Property'.ljust(col0_len + 3) +
'Description'.ljust(col1_len))
lines.append(table_formatstr)
lines.extend([pad + n.ljust(col0_len + 3) + a.ljust(col1_len)
for n, a in zip(names, accepts)])
lines.append(table_formatstr)
lines.append('')
return lines
def properties(self):
"""
return a dictionary mapping property name -> value
"""
o = self.oorig
getters = [name for name in dir(o)
if name.startswith('get_') and callable(getattr(o, name))]
getters.sort()
d = dict()
for name in getters:
func = getattr(o, name)
if self.is_alias(func):
continue
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
val = func()
except:
continue
else:
d[name[4:]] = val
return d
def pprint_getters(self):
"""
Return the getters and actual values as list of strings.
"""
lines = []
for name, val in sorted(six.iteritems(self.properties())):
if getattr(val, 'shape', ()) != () and len(val) > 6:
s = str(val[:6]) + '...'
else:
s = str(val)
s = s.replace('\n', ' ')
if len(s) > 50:
s = s[:50] + '...'
name = self.aliased_name(name)
lines.append(' %s = %s' % (name, s))
return lines
def getp(obj, property=None):
"""
Return the value of object's property. *property* is an optional string
for the property you want to return
Example usage::
getp(obj) # get all the object properties
getp(obj, 'linestyle') # get the linestyle property
*obj* is a :class:`Artist` instance, e.g.,
:class:`~matplotllib.lines.Line2D` or an instance of a
:class:`~matplotlib.axes.Axes` or :class:`matplotlib.text.Text`.
If the *property* is 'somename', this function returns
obj.get_somename()
:func:`getp` can be used to query all the gettable properties with
``getp(obj)``. Many properties have aliases for shorter typing, e.g.
'lw' is an alias for 'linewidth'. In the output, aliases and full
property names will be listed as:
property or alias = value
e.g.:
linewidth or lw = 2
"""
if property is None:
insp = ArtistInspector(obj)
ret = insp.pprint_getters()
print('\n'.join(ret))
return
func = getattr(obj, 'get_' + property)
return func()
# alias
get = getp
def setp(obj, *args, **kwargs):
"""
Set a property on an artist object.
matplotlib supports the use of :func:`setp` ("set property") and
:func:`getp` to set and get object properties, as well as to do
introspection on the object. For example, to set the linestyle of a
line to be dashed, you can do::
>>> line, = plot([1,2,3])
>>> setp(line, linestyle='--')
If you want to know the valid types of arguments, you can provide
the name of the property you want to set without a value::
>>> setp(line, 'linestyle')
linestyle: [ '-' | '--' | '-.' | ':' | 'steps' | 'None' ]
If you want to see all the properties that can be set, and their
possible values, you can do::
>>> setp(line)
... long output listing omitted
You may specify another output file to `setp` if `sys.stdout` is not
acceptable for some reason using the `file` keyword-only argument::
>>> with fopen('output.log') as f:
>>> setp(line, file=f)
:func:`setp` operates on a single instance or a iterable of
instances. If you are in query mode introspecting the possible
values, only the first instance in the sequence is used. When
actually setting values, all the instances will be set. e.g.,
suppose you have a list of two lines, the following will make both
lines thicker and red::
>>> x = arange(0,1.0,0.01)
>>> y1 = sin(2*pi*x)
>>> y2 = sin(4*pi*x)
>>> lines = plot(x, y1, x, y2)
>>> setp(lines, linewidth=2, color='r')
:func:`setp` works with the MATLAB style string/value pairs or
with python kwargs. For example, the following are equivalent::
>>> setp(lines, 'linewidth', 2, 'color', 'r') # MATLAB style
>>> setp(lines, linewidth=2, color='r') # python style
"""
if not cbook.iterable(obj):
objs = [obj]
else:
objs = list(cbook.flatten(obj))
if not objs:
return
insp = ArtistInspector(objs[0])
# file has to be popped before checking if kwargs is empty
printArgs = {}
if 'file' in kwargs:
printArgs['file'] = kwargs.pop('file')
if not kwargs and len(args) < 2:
if args:
print(insp.pprint_setters(prop=args[0]), **printArgs)
else:
print('\n'.join(insp.pprint_setters()), **printArgs)
return
if len(args) % 2:
raise ValueError('The set args must be string, value pairs')
# put args into ordereddict to maintain order
funcvals = OrderedDict()
for i in range(0, len(args) - 1, 2):
funcvals[args[i]] = args[i + 1]
ret = [o.update(funcvals) for o in objs]
ret.extend([o.set(**kwargs) for o in objs])
return [x for x in cbook.flatten(ret)]
def kwdoc(a):
hardcopy = matplotlib.rcParams['docstring.hardcopy']
if hardcopy:
return '\n'.join(ArtistInspector(a).pprint_setters_rest(
leadingspace=2))
else:
return '\n'.join(ArtistInspector(a).pprint_setters(leadingspace=2))
docstring.interpd.update(Artist=kwdoc(Artist))
_get_axes_msg = """{0} has been deprecated in mpl 1.5, please use the
axes property. A removal date has not been set."""
| mit |
jakobworldpeace/scikit-learn | sklearn/linear_model/coordinate_descent.py | 1 | 80266 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Gael Varoquaux <gael.varoquaux@inria.fr>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import _preprocess_data
from ..utils import check_array, check_X_y
from ..utils.validation import check_random_state
from ..model_selection import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..exceptions import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 < l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. (currently not
supported) ``For l1_ratio = 1`` it is an L1 penalty. For
``0 < l1_ratio <1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
if l1_ratio == 0:
raise ValueError("Automatic alpha grid generation is not supported for"
" l1_ratio=0. Please supply a grid by providing "
"your estimator with the appropriate `alphas=` "
"argument.")
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = _preprocess_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,
normalize,
return_mean=True)
mean_dot = X_offset * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_scale[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, return_n_iter=return_n_iter, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False,
check_input=True, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
check_input : bool, default True
Skip input validation checks, including the Gram matrix when provided
assuming there are handled by the caller when check_input=False.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py for an
example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
# We expect X and y to be already Fortran ordered when bypassing
# checks
if check_input:
X = check_array(X, 'csc', dtype=[np.float64, np.float32],
order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=X.dtype.type, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
# Xy should be a 1d contiguous array or a 2D C ordered array
Xy = check_array(Xy, dtype=X.dtype.type, order='C', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_offset' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_offset'] / params['X_scale']
X_sparse_scaling = np.asarray(X_sparse_scaling, dtype=X.dtype)
else:
X_sparse_scaling = np.zeros(n_features, dtype=X.dtype)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if check_input:
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False, copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=X.dtype)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=X.dtype)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1], dtype=X.dtype))
else:
coef_ = np.asfortranarray(coef_init, dtype=X.dtype)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, dtype=X.dtype.type,
order='C')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like. Got %r" % precompute)
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations.' +
' Fitting data with very small alpha' +
' may cause precision problems.',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the penalty terms. Defaults to 1.0.
See the notes for the exact mathematical meaning of this
parameter.``alpha = 0`` is equivalent to an ordinary least square,
solved by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`LinearRegression` object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : True | False | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. The Gram matrix can also be passed as argument.
For sparse input this option is always ``True`` to preserve sparsity.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y, check_input=True):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if isinstance(self.precompute, six.string_types):
raise ValueError('precompute should be one of True, False or'
' array-like. Got %r' % self.precompute)
# We expect X and y to be float64 or float32 Fortran ordered arrays
# when bypassing checks
if check_input:
X, y = check_X_y(X, y, accept_sparse='csc',
order='F', dtype=[np.float64, np.float32],
copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
y = check_array(y, order='F', copy=False, dtype=X.dtype.type,
ensure_2d=False)
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=False)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or not hasattr(self, "coef_"):
coef_ = np.zeros((n_targets, n_features), dtype=X.dtype,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=X.dtype)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_offset=X_offset, X_scale=X_scale, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_offset, y_offset, X_scale)
# workaround since _set_intercept will cast self.coef_ into X.dtype
self.coef_ = np.asarray(self.coef_, dtype=X.dtype)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted ``coef_`` """
return sparse.csr_matrix(self.coef_)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`LinearRegression` object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | array-like, default=False
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_offset'] = X_offset
path_params['X_scale'] = X_scale
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_offset = np.atleast_1d(y_offset)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_scale)
coefs[:, nonzeros] /= X_scale[nonzeros][:, np.newaxis]
intercepts = y_offset[:, np.newaxis] - np.dot(X_offset, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matrices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = check_array(y, copy=False, dtype=[np.float64, np.float32],
ensure_2d=False)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if (hasattr(reference_to_old_X, "data") and
not np.may_share_memory(reference_to_old_X.data, X.data)):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=[np.float64, np.float32],
order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv.split(X))
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=X.dtype.type)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/plot_lasso_model_selection.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float or array of floats, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/plot_lasso_model_selection.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_elastic_net>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it
is an L2 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array.
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskElasticNet model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
X = check_array(X, dtype=[np.float64, np.float32], order='F',
copy=self.copy_X and self.fit_intercept)
y = check_array(y, dtype=X.dtype.type, ensure_2d=False)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=X.dtype.type,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_offset, y_offset, X_scale)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations',
ConvergenceWarning)
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it
is an L2 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| bsd-3-clause |
Barmaley-exe/scikit-learn | sklearn/linear_model/logistic.py | 3 | 55888 | """
Logistic Regression
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <f@bianp.net>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Lars Buitinck
# Simon Wu <s8wu@uwaterloo.ca>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
squared_norm)
from ..utils.optimize import newton_cg
from ..utils.validation import (as_float_array, DataConversionWarning,
check_X_y)
from ..utils.fixes import expit
from ..externals.joblib import Parallel, delayed
from ..cross_validation import _check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
return w, c, y * z
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
_, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_loss_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss, gradient and the Hessian.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return out, grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_loss_grad_hess(w, X, Y, alpha, sample_weight):
"""
Provides multinomial loss, gradient, and a function for computing hessian
vector product.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return loss, grad, hessp
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=True,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr'):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when logistic_regression_path
is called repeatedly with the same data, as y is modified
along the path.
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
Notes
-----
You might get slighly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class can be either 'multinomial' or 'ovr'"
"got %s" % multi_class)
if solver not in ['liblinear', 'newton-cg', 'lbfgs']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg and lbfgs solvers. got %s" % solver)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s cannot solve problems with "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("newton-cg and lbfgs solvers support only "
"l2 penalties, got %s penalty." % penalty)
if dual:
raise ValueError("newton-cg and lbfgs solvers support only "
"dual=False, got dual=%s" % dual)
# Preprocessing.
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
_, n_features = X.shape
check_consistent_length(X, y)
classes = np.unique(y)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "auto", then
# the class_weights are assigned after masking the labels with a OvR.
sample_weight = np.ones(X.shape[0])
le = LabelEncoder()
if isinstance(class_weight, dict):
if solver == "liblinear":
if classes.size == 2:
# Reconstruct the weights with keys 1 and -1
temp = {1: class_weight[pos_class],
-1: class_weight[classes[0]]}
class_weight = temp.copy()
else:
raise ValueError("In LogisticRegressionCV the liblinear "
"solver cannot handle multiclass with "
"class_weight of type dict. Use the lbfgs, "
"newton-cg solvers or set "
"class_weight='auto'")
else:
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = [-1, 1]
mask = (y == pos_class)
y[mask] = 1
y[~mask] = -1
# To take care of object dtypes, i.e 1 and -1 are in the form of
# strings.
y = as_float_array(y, copy=False)
else:
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
if Y_bin.shape[1] == 1:
Y_bin = np.hstack([1 - Y_bin, Y_bin])
w0 = np.zeros((Y_bin.shape[1], n_features + int(fit_intercept)),
order='F')
mask_classes = classes
if class_weight == "auto":
class_weight_ = compute_class_weight(class_weight, mask_classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size)
)
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_vectors = classes.size
if n_vectors == 2:
n_vectors = 1
if (coef.shape[0] != n_vectors or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1
)
)
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
w0 = w0.ravel()
target = Y_bin
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_loss_grad_hess
else:
target = y
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_loss_grad_hess
coefs = list()
for C in Cs:
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter
)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol
)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0 = newton_cg(hess, func, grad, w0, args=args, maxiter=max_iter,
tol=tol)
elif solver == 'liblinear':
coef_, intercept_, _, = _fit_liblinear(
X, y, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol,
)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0)
return coefs, np.array(Cs)
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, copy=True, intercept_scaling=1.,
multi_class='ovr'):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable
For a list of scoring functions that can be used, look at
:mod:`sklearn.metrics`. The default scoring option used is
accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when ``_log_reg_scoring_path`` is called
repeatedly with the same data, as y is modified along the path.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
"""
log_reg = LogisticRegression(fit_intercept=fit_intercept)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test[mask] = 1
y_test[~mask] = -1
# To deal with object dtypes, we need to convert into an array of floats.
y_test = as_float_array(y_test, copy=False)
coefs, Cs = logistic_regression_path(X_train, y_train, Cs=Cs,
fit_intercept=fit_intercept,
solver=solver,
max_iter=max_iter,
class_weight=class_weight,
copy=copy, pos_class=pos_class,
multi_class=multi_class,
tol=tol, verbose=verbose,
dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores)
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr' and uses the
cross-entropy loss, if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs' and
'newton-cg' solvers.)
This class implements regularized logistic regression using the
`liblinear` library, newton-cg and lbfgs solvers. It can handle both
dense and sparse input. Use C-ordered arrays or CSR matrices containing
64-bit floats for optimal performance; any other input format will be
converted (and copied).
The newton-cg and lbfgs solvers support only L2 regularization with primal
formulation. The liblinear solver supports both L1 and L2 regularization,
with a dual formulation only for the L2 penalty.
Parameters
----------
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
intercept_scaling : float, default: 1
Useful only if solver is liblinear.
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
max_iter : int
Useful only for the newton-cg and lbfgs solvers. Maximum number of
iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : int
Maximum of the actual number of iterations across all classes.
Valid only for the liblinear solver.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
See also
--------
sklearn.linear_model.SGDClassifier
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order="C")
self.classes_ = np.unique(y)
if self.solver not in ['liblinear', 'newton-cg', 'lbfgs']:
raise ValueError(
"Logistic Regression supports only liblinear, newton-cg and "
"lbfgs solvers, Got solver=%s" % self.solver
)
if self.solver == 'liblinear' and self.multi_class == 'multinomial':
raise ValueError("Solver %s does not support a multinomial "
"backend." % self.solver)
if self.multi_class not in ['ovr', 'multinomial']:
raise ValueError("multi_class should be either ovr or multinomial "
"got %s" % self.multi_class)
if self.solver == 'liblinear':
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol
)
return self
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
for ind, class_ in enumerate(classes_):
coef_, _ = logistic_regression_path(
X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight)
self.coef_.append(coef_[0])
self.coef_ = np.squeeze(self.coef_)
# For the binary case, this get squeezed to a 1-D array.
if self.coef_.ndim == 1:
self.coef_ = self.coef_[np.newaxis, :]
self.coef_ = np.asarray(self.coef_)
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
return self._predict_proba_lr(X)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg or
LBFGS optimizer. The newton-cg and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so in general it is supposed to be faster.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.cross_validation` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
intercept_scaling : float, default 1.
Useful only if solver is liblinear.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr'):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
if self.solver != 'liblinear':
if self.penalty != 'l2':
raise ValueError("newton-cg and lbfgs solvers support only "
"l2 penalties.")
if self.dual:
raise ValueError("newton-cg and lbfgs solvers support only "
"the primal form.")
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, dtype=None)
if self.multi_class not in ['ovr', 'multinomial']:
raise ValueError("multi_class backend should be either "
"'ovr' or 'multinomial'"
" got %s" % self.multi_class)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning
)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = _check_cv(self.cv, X, y, classifier=True)
folds = list(cv)
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight == 'auto'):
raise ValueError("class_weight provided should be a "
"dict or 'auto'")
path_func = delayed(_log_reg_scoring_path)
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=self.class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
else:
coefs_paths, Cs, scores = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
class_weight=self.class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1))
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([
coefs_paths[i][best_indices[i]]
for i in range(len(folds))
], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| bsd-3-clause |
q1ang/scikit-learn | sklearn/gaussian_process/gaussian_process.py | 78 | 34552 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = np.atleast_2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = np.atleast_2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = np.atleast_2d(self.thetaL)
self.thetaU = np.atleast_2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| bsd-3-clause |
ycaihua/scikit-learn | sklearn/ensemble/gradient_boosting.py | 2 | 60872 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
from time import time
import numbers
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, check_array, check_X_y, column_or_1d
from ..utils import check_consistent_length
from ..utils.extmath import logsumexp
from ..utils.fixes import expit
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted, NotFittedError
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE, TREE_LEAF
from ..tree._tree import PresortBestSplitter
from ..tree._tree import FriedmanMSE
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float)
class_counts = np.bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
If the loss does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = 1.0 / (1.0 + np.exp(-score.ravel()))
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = 1.0 / (1.0 + np.exp(-2.0 * score.ravel()))
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
criterion, splitter, random_state):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=criterion,
splitter=splitter,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
max_features = int(self.max_features * self.n_features)
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# init criterion and splitter
criterion = FriedmanMSE(1)
splitter = PresortBestSplitter(criterion,
self.max_features_,
self.min_samples_leaf,
self.min_weight_fraction_leaf,
random_state)
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, criterion, splitter,
random_state)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (old_oob_score -
loss_(y[~sample_mask], y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
# Default implementation
return y
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def _validate_y(self, y):
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self.staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self.staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
return self.decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self.staged_decision_function(X):
yield y.ravel()
| bsd-3-clause |
arhik/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/font_manager.py | 69 | 42655 | """
A module for finding, managing, and using fonts across platforms.
This module provides a single :class:`FontManager` instance that can
be shared across backends and platforms. The :func:`findfont`
function returns the best TrueType (TTF) font file in the local or
system font path that matches the specified :class:`FontProperties`
instance. The :class:`FontManager` also handles Adobe Font Metrics
(AFM) font files for use by the PostScript backend.
The design is based on the `W3C Cascading Style Sheet, Level 1 (CSS1)
font specification <http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_.
Future versions may implement the Level 2 or 2.1 specifications.
Experimental support is included for using `fontconfig
<http://www.fontconfig.org>`_ on Unix variant plaforms (Linux, OS X,
Solaris). To enable it, set the constant ``USE_FONTCONFIG`` in this
file to ``True``. Fontconfig has the advantage that it is the
standard way to look up fonts on X11 platforms, so if a font is
installed, it is much more likely to be found.
"""
"""
KNOWN ISSUES
- documentation
- font variant is untested
- font stretch is incomplete
- font size is incomplete
- font size_adjust is incomplete
- default font algorithm needs improvement and testing
- setWeights function needs improvement
- 'light' is an invalid weight value, remove it.
- update_fonts not implemented
Authors : John Hunter <jdhunter@ace.bsd.uchicago.edu>
Paul Barrett <Barrett@STScI.Edu>
Michael Droettboom <mdroe@STScI.edu>
Copyright : John Hunter (2004,2005), Paul Barrett (2004,2005)
License : matplotlib license (PSF compatible)
The font directory code is from ttfquery,
see license/LICENSE_TTFQUERY.
"""
import os, sys, glob
try:
set
except NameError:
from sets import Set as set
import matplotlib
from matplotlib import afm
from matplotlib import ft2font
from matplotlib import rcParams, get_configdir
from matplotlib.cbook import is_string_like
from matplotlib.fontconfig_pattern import \
parse_fontconfig_pattern, generate_fontconfig_pattern
try:
import cPickle as pickle
except ImportError:
import pickle
USE_FONTCONFIG = False
verbose = matplotlib.verbose
font_scalings = {
'xx-small' : 0.579,
'x-small' : 0.694,
'small' : 0.833,
'medium' : 1.0,
'large' : 1.200,
'x-large' : 1.440,
'xx-large' : 1.728,
'larger' : 1.2,
'smaller' : 0.833,
None : 1.0}
stretch_dict = {
'ultra-condensed' : 100,
'extra-condensed' : 200,
'condensed' : 300,
'semi-condensed' : 400,
'normal' : 500,
'semi-expanded' : 600,
'expanded' : 700,
'extra-expanded' : 800,
'ultra-expanded' : 900}
weight_dict = {
'ultralight' : 100,
'light' : 200,
'normal' : 400,
'regular' : 400,
'book' : 400,
'medium' : 500,
'roman' : 500,
'semibold' : 600,
'demibold' : 600,
'demi' : 600,
'bold' : 700,
'heavy' : 800,
'extra bold' : 800,
'black' : 900}
font_family_aliases = set([
'serif',
'sans-serif',
'cursive',
'fantasy',
'monospace',
'sans'])
# OS Font paths
MSFolders = \
r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders'
MSFontDirectories = [
r'SOFTWARE\Microsoft\Windows NT\CurrentVersion\Fonts',
r'SOFTWARE\Microsoft\Windows\CurrentVersion\Fonts']
X11FontDirectories = [
# an old standard installation point
"/usr/X11R6/lib/X11/fonts/TTF/",
# here is the new standard location for fonts
"/usr/share/fonts/",
# documented as a good place to install new fonts
"/usr/local/share/fonts/",
# common application, not really useful
"/usr/lib/openoffice/share/fonts/truetype/",
]
OSXFontDirectories = [
"/Library/Fonts/",
"/Network/Library/Fonts/",
"/System/Library/Fonts/"
]
if not USE_FONTCONFIG:
home = os.environ.get('HOME')
if home is not None:
# user fonts on OSX
path = os.path.join(home, 'Library', 'Fonts')
OSXFontDirectories.append(path)
path = os.path.join(home, '.fonts')
X11FontDirectories.append(path)
def get_fontext_synonyms(fontext):
"""
Return a list of file extensions extensions that are synonyms for
the given file extension *fileext*.
"""
return {'ttf': ('ttf', 'otf'),
'otf': ('ttf', 'otf'),
'afm': ('afm',)}[fontext]
def win32FontDirectory():
"""
Return the user-specified font directory for Win32. This is
looked up from the registry key::
\\HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders\Fonts
If the key is not found, $WINDIR/Fonts will be returned.
"""
try:
import _winreg
except ImportError:
pass # Fall through to default
else:
try:
user = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, MSFolders)
try:
try:
return _winreg.QueryValueEx(user, 'Fonts')[0]
except OSError:
pass # Fall through to default
finally:
_winreg.CloseKey(user)
except OSError:
pass # Fall through to default
return os.path.join(os.environ['WINDIR'], 'Fonts')
def win32InstalledFonts(directory=None, fontext='ttf'):
"""
Search for fonts in the specified font directory, or use the
system directories if none given. A list of TrueType font
filenames are returned by default, or AFM fonts if *fontext* ==
'afm'.
"""
import _winreg
if directory is None:
directory = win32FontDirectory()
fontext = get_fontext_synonyms(fontext)
key, items = None, {}
for fontdir in MSFontDirectories:
try:
local = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, fontdir)
except OSError:
continue
if not local:
files = []
for ext in fontext:
files.extend(glob.glob(os.path.join(directory, '*.'+ext)))
return files
try:
for j in range(_winreg.QueryInfoKey(local)[1]):
try:
key, direc, any = _winreg.EnumValue( local, j)
if not os.path.dirname(direc):
direc = os.path.join(directory, direc)
direc = os.path.abspath(direc).lower()
if os.path.splitext(direc)[1][1:] in fontext:
items[direc] = 1
except EnvironmentError:
continue
except WindowsError:
continue
return items.keys()
finally:
_winreg.CloseKey(local)
return None
def OSXFontDirectory():
"""
Return the system font directories for OS X. This is done by
starting at the list of hardcoded paths in
:attr:`OSXFontDirectories` and returning all nested directories
within them.
"""
fontpaths = []
def add(arg,directory,files):
fontpaths.append(directory)
for fontdir in OSXFontDirectories:
try:
if os.path.isdir(fontdir):
os.path.walk(fontdir, add, None)
except (IOError, OSError, TypeError, ValueError):
pass
return fontpaths
def OSXInstalledFonts(directory=None, fontext='ttf'):
"""
Get list of font files on OS X - ignores font suffix by default.
"""
if directory is None:
directory = OSXFontDirectory()
fontext = get_fontext_synonyms(fontext)
files = []
for path in directory:
if fontext is None:
files.extend(glob.glob(os.path.join(path,'*')))
else:
for ext in fontext:
files.extend(glob.glob(os.path.join(path, '*.'+ext)))
files.extend(glob.glob(os.path.join(path, '*.'+ext.upper())))
return files
def x11FontDirectory():
"""
Return the system font directories for X11. This is done by
starting at the list of hardcoded paths in
:attr:`X11FontDirectories` and returning all nested directories
within them.
"""
fontpaths = []
def add(arg,directory,files):
fontpaths.append(directory)
for fontdir in X11FontDirectories:
try:
if os.path.isdir(fontdir):
os.path.walk(fontdir, add, None)
except (IOError, OSError, TypeError, ValueError):
pass
return fontpaths
def get_fontconfig_fonts(fontext='ttf'):
"""
Grab a list of all the fonts that are being tracked by fontconfig
by making a system call to ``fc-list``. This is an easy way to
grab all of the fonts the user wants to be made available to
applications, without needing knowing where all of them reside.
"""
try:
import commands
except ImportError:
return {}
fontext = get_fontext_synonyms(fontext)
fontfiles = {}
status, output = commands.getstatusoutput("fc-list file")
if status == 0:
for line in output.split('\n'):
fname = line.split(':')[0]
if (os.path.splitext(fname)[1][1:] in fontext and
os.path.exists(fname)):
fontfiles[fname] = 1
return fontfiles
def findSystemFonts(fontpaths=None, fontext='ttf'):
"""
Search for fonts in the specified font paths. If no paths are
given, will use a standard set of system paths, as well as the
list of fonts tracked by fontconfig if fontconfig is installed and
available. A list of TrueType fonts are returned by default with
AFM fonts as an option.
"""
fontfiles = {}
fontexts = get_fontext_synonyms(fontext)
if fontpaths is None:
if sys.platform == 'win32':
fontdir = win32FontDirectory()
fontpaths = [fontdir]
# now get all installed fonts directly...
for f in win32InstalledFonts(fontdir):
base, ext = os.path.splitext(f)
if len(ext)>1 and ext[1:].lower() in fontexts:
fontfiles[f] = 1
else:
fontpaths = x11FontDirectory()
# check for OS X & load its fonts if present
if sys.platform == 'darwin':
for f in OSXInstalledFonts(fontext=fontext):
fontfiles[f] = 1
for f in get_fontconfig_fonts(fontext):
fontfiles[f] = 1
elif isinstance(fontpaths, (str, unicode)):
fontpaths = [fontpaths]
for path in fontpaths:
files = []
for ext in fontexts:
files.extend(glob.glob(os.path.join(path, '*.'+ext)))
files.extend(glob.glob(os.path.join(path, '*.'+ext.upper())))
for fname in files:
fontfiles[os.path.abspath(fname)] = 1
return [fname for fname in fontfiles.keys() if os.path.exists(fname)]
def weight_as_number(weight):
"""
Return the weight property as a numeric value. String values
are converted to their corresponding numeric value.
"""
if isinstance(weight, str):
try:
weight = weight_dict[weight.lower()]
except KeyError:
weight = 400
elif weight in range(100, 1000, 100):
pass
else:
raise ValueError, 'weight not a valid integer'
return weight
class FontEntry(object):
"""
A class for storing Font properties. It is used when populating
the font lookup dictionary.
"""
def __init__(self,
fname ='',
name ='',
style ='normal',
variant='normal',
weight ='normal',
stretch='normal',
size ='medium',
):
self.fname = fname
self.name = name
self.style = style
self.variant = variant
self.weight = weight
self.stretch = stretch
try:
self.size = str(float(size))
except ValueError:
self.size = size
def ttfFontProperty(font):
"""
A function for populating the :class:`FontKey` by extracting
information from the TrueType font file.
*font* is a :class:`FT2Font` instance.
"""
name = font.family_name
# Styles are: italic, oblique, and normal (default)
sfnt = font.get_sfnt()
sfnt2 = sfnt.get((1,0,0,2))
sfnt4 = sfnt.get((1,0,0,4))
if sfnt2:
sfnt2 = sfnt2.lower()
else:
sfnt2 = ''
if sfnt4:
sfnt4 = sfnt4.lower()
else:
sfnt4 = ''
if sfnt4.find('oblique') >= 0:
style = 'oblique'
elif sfnt4.find('italic') >= 0:
style = 'italic'
elif sfnt2.find('regular') >= 0:
style = 'normal'
elif font.style_flags & ft2font.ITALIC:
style = 'italic'
else:
style = 'normal'
# Variants are: small-caps and normal (default)
# !!!! Untested
if name.lower() in ['capitals', 'small-caps']:
variant = 'small-caps'
else:
variant = 'normal'
# Weights are: 100, 200, 300, 400 (normal: default), 500 (medium),
# 600 (semibold, demibold), 700 (bold), 800 (heavy), 900 (black)
# lighter and bolder are also allowed.
weight = None
for w in weight_dict.keys():
if sfnt4.find(w) >= 0:
weight = w
break
if not weight:
if font.style_flags & ft2font.BOLD:
weight = 700
else:
weight = 400
weight = weight_as_number(weight)
# Stretch can be absolute and relative
# Absolute stretches are: ultra-condensed, extra-condensed, condensed,
# semi-condensed, normal, semi-expanded, expanded, extra-expanded,
# and ultra-expanded.
# Relative stretches are: wider, narrower
# Child value is: inherit
# !!!! Incomplete
if sfnt4.find('narrow') >= 0 or sfnt4.find('condensed') >= 0 or \
sfnt4.find('cond') >= 0:
stretch = 'condensed'
elif sfnt4.find('demi cond') >= 0:
stretch = 'semi-condensed'
elif sfnt4.find('wide') >= 0 or sfnt4.find('expanded') >= 0:
stretch = 'expanded'
else:
stretch = 'normal'
# Sizes can be absolute and relative.
# Absolute sizes are: xx-small, x-small, small, medium, large, x-large,
# and xx-large.
# Relative sizes are: larger, smaller
# Length value is an absolute font size, e.g. 12pt
# Percentage values are in 'em's. Most robust specification.
# !!!! Incomplete
if font.scalable:
size = 'scalable'
else:
size = str(float(font.get_fontsize()))
# !!!! Incomplete
size_adjust = None
return FontEntry(font.fname, name, style, variant, weight, stretch, size)
def afmFontProperty(fontpath, font):
"""
A function for populating a :class:`FontKey` instance by
extracting information from the AFM font file.
*font* is a class:`AFM` instance.
"""
name = font.get_familyname()
# Styles are: italic, oblique, and normal (default)
if font.get_angle() != 0 or name.lower().find('italic') >= 0:
style = 'italic'
elif name.lower().find('oblique') >= 0:
style = 'oblique'
else:
style = 'normal'
# Variants are: small-caps and normal (default)
# !!!! Untested
if name.lower() in ['capitals', 'small-caps']:
variant = 'small-caps'
else:
variant = 'normal'
# Weights are: 100, 200, 300, 400 (normal: default), 500 (medium),
# 600 (semibold, demibold), 700 (bold), 800 (heavy), 900 (black)
# lighter and bolder are also allowed.
weight = weight_as_number(font.get_weight().lower())
# Stretch can be absolute and relative
# Absolute stretches are: ultra-condensed, extra-condensed, condensed,
# semi-condensed, normal, semi-expanded, expanded, extra-expanded,
# and ultra-expanded.
# Relative stretches are: wider, narrower
# Child value is: inherit
# !!!! Incomplete
stretch = 'normal'
# Sizes can be absolute and relative.
# Absolute sizes are: xx-small, x-small, small, medium, large, x-large,
# and xx-large.
# Relative sizes are: larger, smaller
# Length value is an absolute font size, e.g. 12pt
# Percentage values are in 'em's. Most robust specification.
# All AFM fonts are apparently scalable.
size = 'scalable'
# !!!! Incomplete
size_adjust = None
return FontEntry(fontpath, name, style, variant, weight, stretch, size)
def createFontList(fontfiles, fontext='ttf'):
"""
A function to create a font lookup list. The default is to create
a list of TrueType fonts. An AFM font list can optionally be
created.
"""
fontlist = []
# Add fonts from list of known font files.
seen = {}
for fpath in fontfiles:
verbose.report('createFontDict: %s' % (fpath), 'debug')
fname = os.path.split(fpath)[1]
if fname in seen: continue
else: seen[fname] = 1
if fontext == 'afm':
try:
fh = open(fpath, 'r')
except:
verbose.report("Could not open font file %s" % fpath)
continue
try:
try:
font = afm.AFM(fh)
finally:
fh.close()
except RuntimeError:
verbose.report("Could not parse font file %s"%fpath)
continue
prop = afmFontProperty(fpath, font)
else:
try:
font = ft2font.FT2Font(str(fpath))
except RuntimeError:
verbose.report("Could not open font file %s"%fpath)
continue
except UnicodeError:
verbose.report("Cannot handle unicode filenames")
#print >> sys.stderr, 'Bad file is', fpath
continue
try: prop = ttfFontProperty(font)
except: continue
fontlist.append(prop)
return fontlist
class FontProperties(object):
"""
A class for storing and manipulating font properties.
The font properties are those described in the `W3C Cascading
Style Sheet, Level 1
<http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ font
specification. The six properties are:
- family: A list of font names in decreasing order of priority.
The items may include a generic font family name, either
'serif', 'sans-serif', 'cursive', 'fantasy', or 'monospace'.
In that case, the actual font to be used will be looked up
from the associated rcParam in :file:`matplotlibrc`.
- style: Either 'normal', 'italic' or 'oblique'.
- variant: Either 'normal' or 'small-caps'.
- stretch: A numeric value in the range 0-1000 or one of
'ultra-condensed', 'extra-condensed', 'condensed',
'semi-condensed', 'normal', 'semi-expanded', 'expanded',
'extra-expanded' or 'ultra-expanded'
- weight: A numeric value in the range 0-1000 or one of
'ultralight', 'light', 'normal', 'regular', 'book', 'medium',
'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy',
'extra bold', 'black'
- size: Either an relative value of 'xx-small', 'x-small',
'small', 'medium', 'large', 'x-large', 'xx-large' or an
absolute font size, e.g. 12
The default font property for TrueType fonts (as specified in the
default :file:`matplotlibrc` file) is::
sans-serif, normal, normal, normal, normal, scalable.
Alternatively, a font may be specified using an absolute path to a
.ttf file, by using the *fname* kwarg.
The preferred usage of font sizes is to use the relative values,
e.g. 'large', instead of absolute font sizes, e.g. 12. This
approach allows all text sizes to be made larger or smaller based
on the font manager's default font size, i.e. by using the
:meth:`FontManager.set_default_size` method.
This class will also accept a `fontconfig
<http://www.fontconfig.org/>`_ pattern, if it is the only argument
provided. See the documentation on `fontconfig patterns
<http://www.fontconfig.org/fontconfig-user.html>`_. This support
does not require fontconfig to be installed. We are merely
borrowing its pattern syntax for use here.
Note that matplotlib's internal font manager and fontconfig use a
different algorithm to lookup fonts, so the results of the same pattern
may be different in matplotlib than in other applications that use
fontconfig.
"""
def __init__(self,
family = None,
style = None,
variant= None,
weight = None,
stretch= None,
size = None,
fname = None, # if this is set, it's a hardcoded filename to use
_init = None # used only by copy()
):
self._family = None
self._slant = None
self._variant = None
self._weight = None
self._stretch = None
self._size = None
self._file = None
# This is used only by copy()
if _init is not None:
self.__dict__.update(_init.__dict__)
return
if is_string_like(family):
# Treat family as a fontconfig pattern if it is the only
# parameter provided.
if (style is None and
variant is None and
weight is None and
stretch is None and
size is None and
fname is None):
self.set_fontconfig_pattern(family)
return
self.set_family(family)
self.set_style(style)
self.set_variant(variant)
self.set_weight(weight)
self.set_stretch(stretch)
self.set_file(fname)
self.set_size(size)
def _parse_fontconfig_pattern(self, pattern):
return parse_fontconfig_pattern(pattern)
def __hash__(self):
l = self.__dict__.items()
l.sort()
return hash(repr(l))
def __str__(self):
return self.get_fontconfig_pattern()
def get_family(self):
"""
Return a list of font names that comprise the font family.
"""
if self._family is None:
family = rcParams['font.family']
if is_string_like(family):
return [family]
return family
return self._family
def get_name(self):
"""
Return the name of the font that best matches the font
properties.
"""
return ft2font.FT2Font(str(findfont(self))).family_name
def get_style(self):
"""
Return the font style. Values are: 'normal', 'italic' or
'oblique'.
"""
if self._slant is None:
return rcParams['font.style']
return self._slant
get_slant = get_style
def get_variant(self):
"""
Return the font variant. Values are: 'normal' or
'small-caps'.
"""
if self._variant is None:
return rcParams['font.variant']
return self._variant
def get_weight(self):
"""
Set the font weight. Options are: A numeric value in the
range 0-1000 or one of 'light', 'normal', 'regular', 'book',
'medium', 'roman', 'semibold', 'demibold', 'demi', 'bold',
'heavy', 'extra bold', 'black'
"""
if self._weight is None:
return rcParams['font.weight']
return self._weight
def get_stretch(self):
"""
Return the font stretch or width. Options are: 'ultra-condensed',
'extra-condensed', 'condensed', 'semi-condensed', 'normal',
'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded'.
"""
if self._stretch is None:
return rcParams['font.stretch']
return self._stretch
def get_size(self):
"""
Return the font size.
"""
if self._size is None:
return rcParams['font.size']
return self._size
def get_size_in_points(self):
if self._size is not None:
try:
return float(self._size)
except ValueError:
pass
default_size = fontManager.get_default_size()
return default_size * font_scalings.get(self._size)
def get_file(self):
"""
Return the filename of the associated font.
"""
return self._file
def get_fontconfig_pattern(self):
"""
Get a fontconfig pattern suitable for looking up the font as
specified with fontconfig's ``fc-match`` utility.
See the documentation on `fontconfig patterns
<http://www.fontconfig.org/fontconfig-user.html>`_.
This support does not require fontconfig to be installed or
support for it to be enabled. We are merely borrowing its
pattern syntax for use here.
"""
return generate_fontconfig_pattern(self)
def set_family(self, family):
"""
Change the font family. May be either an alias (generic name
is CSS parlance), such as: 'serif', 'sans-serif', 'cursive',
'fantasy', or 'monospace', or a real font name.
"""
if family is None:
self._family = None
else:
if is_string_like(family):
family = [family]
self._family = family
set_name = set_family
def set_style(self, style):
"""
Set the font style. Values are: 'normal', 'italic' or
'oblique'.
"""
if style not in ('normal', 'italic', 'oblique', None):
raise ValueError("style must be normal, italic or oblique")
self._slant = style
set_slant = set_style
def set_variant(self, variant):
"""
Set the font variant. Values are: 'normal' or 'small-caps'.
"""
if variant not in ('normal', 'small-caps', None):
raise ValueError("variant must be normal or small-caps")
self._variant = variant
def set_weight(self, weight):
"""
Set the font weight. May be either a numeric value in the
range 0-1000 or one of 'ultralight', 'light', 'normal',
'regular', 'book', 'medium', 'roman', 'semibold', 'demibold',
'demi', 'bold', 'heavy', 'extra bold', 'black'
"""
if weight is not None:
try:
weight = int(weight)
if weight < 0 or weight > 1000:
raise ValueError()
except ValueError:
if weight not in weight_dict:
raise ValueError("weight is invalid")
self._weight = weight
def set_stretch(self, stretch):
"""
Set the font stretch or width. Options are: 'ultra-condensed',
'extra-condensed', 'condensed', 'semi-condensed', 'normal',
'semi-expanded', 'expanded', 'extra-expanded' or
'ultra-expanded', or a numeric value in the range 0-1000.
"""
if stretch is not None:
try:
stretch = int(stretch)
if stretch < 0 or stretch > 1000:
raise ValueError()
except ValueError:
if stretch not in stretch_dict:
raise ValueError("stretch is invalid")
self._stretch = stretch
def set_size(self, size):
"""
Set the font size. Either an relative value of 'xx-small',
'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'
or an absolute font size, e.g. 12.
"""
if size is not None:
try:
size = float(size)
except ValueError:
if size is not None and size not in font_scalings:
raise ValueError("size is invalid")
self._size = size
def set_file(self, file):
"""
Set the filename of the fontfile to use. In this case, all
other properties will be ignored.
"""
self._file = file
def set_fontconfig_pattern(self, pattern):
"""
Set the properties by parsing a fontconfig *pattern*.
See the documentation on `fontconfig patterns
<http://www.fontconfig.org/fontconfig-user.html>`_.
This support does not require fontconfig to be installed or
support for it to be enabled. We are merely borrowing its
pattern syntax for use here.
"""
for key, val in self._parse_fontconfig_pattern(pattern).items():
if type(val) == list:
getattr(self, "set_" + key)(val[0])
else:
getattr(self, "set_" + key)(val)
def copy(self):
"""Return a deep copy of self"""
return FontProperties(_init = self)
def ttfdict_to_fnames(d):
"""
flatten a ttfdict to all the filenames it contains
"""
fnames = []
for named in d.values():
for styled in named.values():
for variantd in styled.values():
for weightd in variantd.values():
for stretchd in weightd.values():
for fname in stretchd.values():
fnames.append(fname)
return fnames
def pickle_dump(data, filename):
"""
Equivalent to pickle.dump(data, open(filename, 'w'))
but closes the file to prevent filehandle leakage.
"""
fh = open(filename, 'w')
try:
pickle.dump(data, fh)
finally:
fh.close()
def pickle_load(filename):
"""
Equivalent to pickle.load(open(filename, 'r'))
but closes the file to prevent filehandle leakage.
"""
fh = open(filename, 'r')
try:
data = pickle.load(fh)
finally:
fh.close()
return data
class FontManager:
"""
On import, the :class:`FontManager` singleton instance creates a
list of TrueType fonts based on the font properties: name, style,
variant, weight, stretch, and size. The :meth:`findfont` method
does a nearest neighbor search to find the font that most closely
matches the specification. If no good enough match is found, a
default font is returned.
"""
def __init__(self, size=None, weight='normal'):
self.__default_weight = weight
self.default_size = size
paths = [os.path.join(rcParams['datapath'], 'fonts', 'ttf'),
os.path.join(rcParams['datapath'], 'fonts', 'afm')]
# Create list of font paths
for pathname in ['TTFPATH', 'AFMPATH']:
if pathname in os.environ:
ttfpath = os.environ[pathname]
if ttfpath.find(';') >= 0: #win32 style
paths.extend(ttfpath.split(';'))
elif ttfpath.find(':') >= 0: # unix style
paths.extend(ttfpath.split(':'))
else:
paths.append(ttfpath)
verbose.report('font search path %s'%(str(paths)))
# Load TrueType fonts and create font dictionary.
self.ttffiles = findSystemFonts(paths) + findSystemFonts()
for fname in self.ttffiles:
verbose.report('trying fontname %s' % fname, 'debug')
if fname.lower().find('vera.ttf')>=0:
self.defaultFont = fname
break
else:
# use anything
self.defaultFont = self.ttffiles[0]
self.ttflist = createFontList(self.ttffiles)
if rcParams['pdf.use14corefonts']:
# Load only the 14 PDF core fonts. These fonts do not need to be
# embedded; every PDF viewing application is required to have them:
# Helvetica, Helvetica-Bold, Helvetica-Oblique, Helvetica-BoldOblique,
# Courier, Courier-Bold, Courier-Oblique, Courier-BoldOblique,
# Times-Roman, Times-Bold, Times-Italic, Times-BoldItalic, Symbol,
# ZapfDingbats.
afmpath = os.path.join(rcParams['datapath'],'fonts','pdfcorefonts')
afmfiles = findSystemFonts(afmpath, fontext='afm')
self.afmlist = createFontList(afmfiles, fontext='afm')
else:
self.afmfiles = findSystemFonts(paths, fontext='afm') + \
findSystemFonts(fontext='afm')
self.afmlist = createFontList(self.afmfiles, fontext='afm')
self.ttf_lookup_cache = {}
self.afm_lookup_cache = {}
def get_default_weight(self):
"""
Return the default font weight.
"""
return self.__default_weight
def get_default_size(self):
"""
Return the default font size.
"""
if self.default_size is None:
return rcParams['font.size']
return self.default_size
def set_default_weight(self, weight):
"""
Set the default font weight. The initial value is 'normal'.
"""
self.__default_weight = weight
def set_default_size(self, size):
"""
Set the default font size in points. The initial value is set
by ``font.size`` in rc.
"""
self.default_size = size
def update_fonts(self, filenames):
"""
Update the font dictionary with new font files.
Currently not implemented.
"""
# !!!! Needs implementing
raise NotImplementedError
# Each of the scoring functions below should return a value between
# 0.0 (perfect match) and 1.0 (terrible match)
def score_family(self, families, family2):
"""
Returns a match score between the list of font families in
*families* and the font family name *family2*.
An exact match anywhere in the list returns 0.0.
A match by generic font name will return 0.1.
No match will return 1.0.
"""
for i, family1 in enumerate(families):
if family1.lower() in font_family_aliases:
if family1 == 'sans':
family1 == 'sans-serif'
options = rcParams['font.' + family1]
if family2 in options:
idx = options.index(family2)
return 0.1 * (float(idx) / len(options))
elif family1.lower() == family2.lower():
return 0.0
return 1.0
def score_style(self, style1, style2):
"""
Returns a match score between *style1* and *style2*.
An exact match returns 0.0.
A match between 'italic' and 'oblique' returns 0.1.
No match returns 1.0.
"""
if style1 == style2:
return 0.0
elif style1 in ('italic', 'oblique') and \
style2 in ('italic', 'oblique'):
return 0.1
return 1.0
def score_variant(self, variant1, variant2):
"""
Returns a match score between *variant1* and *variant2*.
An exact match returns 0.0, otherwise 1.0.
"""
if variant1 == variant2:
return 0.0
else:
return 1.0
def score_stretch(self, stretch1, stretch2):
"""
Returns a match score between *stretch1* and *stretch2*.
The result is the absolute value of the difference between the
CSS numeric values of *stretch1* and *stretch2*, normalized
between 0.0 and 1.0.
"""
try:
stretchval1 = int(stretch1)
except ValueError:
stretchval1 = stretch_dict.get(stretch1, 500)
try:
stretchval2 = int(stretch2)
except ValueError:
stretchval2 = stretch_dict.get(stretch2, 500)
return abs(stretchval1 - stretchval2) / 1000.0
def score_weight(self, weight1, weight2):
"""
Returns a match score between *weight1* and *weight2*.
The result is the absolute value of the difference between the
CSS numeric values of *weight1* and *weight2*, normalized
between 0.0 and 1.0.
"""
try:
weightval1 = int(weight1)
except ValueError:
weightval1 = weight_dict.get(weight1, 500)
try:
weightval2 = int(weight2)
except ValueError:
weightval2 = weight_dict.get(weight2, 500)
return abs(weightval1 - weightval2) / 1000.0
def score_size(self, size1, size2):
"""
Returns a match score between *size1* and *size2*.
If *size2* (the size specified in the font file) is 'scalable', this
function always returns 0.0, since any font size can be generated.
Otherwise, the result is the absolute distance between *size1* and
*size2*, normalized so that the usual range of font sizes (6pt -
72pt) will lie between 0.0 and 1.0.
"""
if size2 == 'scalable':
return 0.0
# Size value should have already been
try:
sizeval1 = float(size1)
except ValueError:
sizeval1 = self.default_size * font_scalings(size1)
try:
sizeval2 = float(size2)
except ValueError:
return 1.0
return abs(sizeval1 - sizeval2) / 72.0
def findfont(self, prop, fontext='ttf'):
"""
Search the font list for the font that most closely matches
the :class:`FontProperties` *prop*.
:meth:`findfont` performs a nearest neighbor search. Each
font is given a similarity score to the target font
properties. The first font with the highest score is
returned. If no matches below a certain threshold are found,
the default font (usually Vera Sans) is returned.
The result is cached, so subsequent lookups don't have to
perform the O(n) nearest neighbor search.
See the `W3C Cascading Style Sheet, Level 1
<http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ documentation
for a description of the font finding algorithm.
"""
debug = False
if prop is None:
return self.defaultFont
if is_string_like(prop):
prop = FontProperties(prop)
fname = prop.get_file()
if fname is not None:
verbose.report('findfont returning %s'%fname, 'debug')
return fname
if fontext == 'afm':
font_cache = self.afm_lookup_cache
fontlist = self.afmlist
else:
font_cache = self.ttf_lookup_cache
fontlist = self.ttflist
cached = font_cache.get(hash(prop))
if cached:
return cached
best_score = 1e64
best_font = None
for font in fontlist:
# Matching family should have highest priority, so it is multiplied
# by 10.0
score = \
self.score_family(prop.get_family(), font.name) * 10.0 + \
self.score_style(prop.get_style(), font.style) + \
self.score_variant(prop.get_variant(), font.variant) + \
self.score_weight(prop.get_weight(), font.weight) + \
self.score_stretch(prop.get_stretch(), font.stretch) + \
self.score_size(prop.get_size(), font.size)
if score < best_score:
best_score = score
best_font = font
if score == 0:
break
if best_font is None or best_score >= 10.0:
verbose.report('findfont: Could not match %s. Returning %s' %
(prop, self.defaultFont))
result = self.defaultFont
else:
verbose.report('findfont: Matching %s to %s (%s) with score of %f' %
(prop, best_font.name, best_font.fname, best_score))
result = best_font.fname
font_cache[hash(prop)] = result
return result
_is_opentype_cff_font_cache = {}
def is_opentype_cff_font(filename):
"""
Returns True if the given font is a Postscript Compact Font Format
Font embedded in an OpenType wrapper. Used by the PostScript and
PDF backends that can not subset these fonts.
"""
if os.path.splitext(filename)[1].lower() == '.otf':
result = _is_opentype_cff_font_cache.get(filename)
if result is None:
fd = open(filename, 'rb')
tag = fd.read(4)
fd.close()
result = (tag == 'OTTO')
_is_opentype_cff_font_cache[filename] = result
return result
return False
# The experimental fontconfig-based backend.
if USE_FONTCONFIG and sys.platform != 'win32':
import re
def fc_match(pattern, fontext):
import commands
fontexts = get_fontext_synonyms(fontext)
ext = "." + fontext
status, output = commands.getstatusoutput('fc-match -sv "%s"' % pattern)
if status == 0:
for match in _fc_match_regex.finditer(output):
file = match.group(1)
if os.path.splitext(file)[1][1:] in fontexts:
return file
return None
_fc_match_regex = re.compile(r'\sfile:\s+"([^"]*)"')
_fc_match_cache = {}
def findfont(prop, fontext='ttf'):
if not is_string_like(prop):
prop = prop.get_fontconfig_pattern()
cached = _fc_match_cache.get(prop)
if cached is not None:
return cached
result = fc_match(prop, fontext)
if result is None:
result = fc_match(':', fontext)
_fc_match_cache[prop] = result
return result
else:
_fmcache = os.path.join(get_configdir(), 'fontList.cache')
fontManager = None
def _rebuild():
global fontManager
fontManager = FontManager()
pickle_dump(fontManager, _fmcache)
verbose.report("generated new fontManager")
try:
fontManager = pickle_load(_fmcache)
fontManager.default_size = None
verbose.report("Using fontManager instance from %s" % _fmcache)
except:
_rebuild()
def findfont(prop, **kw):
global fontManager
font = fontManager.findfont(prop, **kw)
if not os.path.exists(font):
verbose.report("%s returned by pickled fontManager does not exist" % font)
_rebuild()
font = fontManager.findfont(prop, **kw)
return font
| agpl-3.0 |
ilo10/scikit-learn | sklearn/tree/tree.py | 113 | 34767 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
waynenilsen/statsmodels | statsmodels/genmod/cov_struct.py | 19 | 46892 | from statsmodels.compat.python import iterkeys, itervalues, zip, range
from statsmodels.stats.correlation_tools import cov_nearest
import numpy as np
import pandas as pd
from scipy import linalg as spl
from collections import defaultdict
from statsmodels.tools.sm_exceptions import (ConvergenceWarning,
IterationLimitWarning)
import warnings
"""
Some details for the covariance calculations can be found in the Stata
docs:
http://www.stata.com/manuals13/xtxtgee.pdf
"""
class CovStruct(object):
"""
A base class for correlation and covariance structures of grouped
data.
Each implementation of this class takes the residuals from a
regression model that has been fitted to grouped data, and uses
them to estimate the within-group dependence structure of the
random errors in the model.
The state of the covariance structure is represented through the
value of the class variable `dep_params`. The default state of a
newly-created instance should correspond to the identity
correlation matrix.
"""
def __init__(self, cov_nearest_method="clipped"):
# Parameters describing the dependency structure
self.dep_params = None
# Keep track of the number of times that the covariance was
# adjusted.
self.cov_adjust = []
# Method for projecting the covariance matrix if it not SPD.
self.cov_nearest_method = cov_nearest_method
def initialize(self, model):
"""
Called by GEE, used by implementations that need additional
setup prior to running `fit`.
Parameters
----------
model : GEE class
A reference to the parent GEE class instance.
"""
self.model = model
def update(self, params):
"""
Updates the association parameter values based on the current
regression coefficients.
Parameters
----------
params : array-like
Working values for the regression parameters.
"""
raise NotImplementedError
def covariance_matrix(self, endog_expval, index):
"""
Returns the working covariance or correlation matrix for a
given cluster of data.
Parameters
----------
endog_expval: array-like
The expected values of endog for the cluster for which the
covariance or correlation matrix will be returned
index: integer
The index of the cluster for which the covariane or
correlation matrix will be returned
Returns
-------
M: matrix
The covariance or correlation matrix of endog
is_cor: bool
True if M is a correlation matrix, False if M is a
covariance matrix
"""
raise NotImplementedError
def covariance_matrix_solve(self, expval, index, stdev, rhs):
"""
Solves matrix equations of the form `covmat * soln = rhs` and
returns the values of `soln`, where `covmat` is the covariance
matrix represented by this class.
Parameters
----------
expval: array-like
The expected value of endog for each observed value in the
group.
index: integer
The group index.
stdev : array-like
The standard deviation of endog for each observation in
the group.
rhs : list/tuple of array-like
A set of right-hand sides; each defines a matrix equation
to be solved.
Returns
-------
soln : list/tuple of array-like
The solutions to the matrix equations.
Notes
-----
Returns None if the solver fails.
Some dependence structures do not use `expval` and/or `index`
to determine the correlation matrix. Some families
(e.g. binomial) do not use the `stdev` parameter when forming
the covariance matrix.
If the covariance matrix is singular or not SPD, it is
projected to the nearest such matrix. These projection events
are recorded in the fit_history member of the GEE model.
Systems of linear equations with the covariance matrix as the
left hand side (LHS) are solved for different right hand sides
(RHS); the LHS is only factorized once to save time.
This is a default implementation, it can be reimplemented in
subclasses to optimize the linear algebra according to the
struture of the covariance matrix.
"""
vmat, is_cor = self.covariance_matrix(expval, index)
if is_cor:
vmat *= np.outer(stdev, stdev)
# Factor the covariance matrix. If the factorization fails,
# attempt to condition it into a factorizable matrix.
threshold = 1e-2
success = False
cov_adjust = 0
for itr in range(20):
try:
vco = spl.cho_factor(vmat)
success = True
break
except np.linalg.LinAlgError:
vmat = cov_nearest(vmat, method=self.cov_nearest_method,
threshold=threshold)
threshold *= 2
cov_adjust += 1
self.cov_adjust.append(cov_adjust)
# Last resort if we still can't factor the covariance matrix.
if success == False:
warnings.warn("Unable to condition covariance matrix to an SPD matrix using cov_nearest",
ConvergenceWarning)
vmat = np.diag(np.diag(vmat))
vco = spl.cho_factor(vmat)
soln = [spl.cho_solve(vco, x) for x in rhs]
return soln
def summary(self):
"""
Returns a text summary of the current estimate of the
dependence structure.
"""
raise NotImplementedError
class Independence(CovStruct):
"""
An independence working dependence structure.
"""
# Nothing to update
def update(self, params):
return
def covariance_matrix(self, expval, index):
dim = len(expval)
return np.eye(dim, dtype=np.float64), True
def covariance_matrix_solve(self, expval, index, stdev, rhs):
v = stdev**2
rslt = []
for x in rhs:
if x.ndim == 1:
rslt.append(x / v)
else:
rslt.append(x / v[:, None])
return rslt
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__
def summary(self):
return "Observations within a cluster are modeled as being independent."
class Exchangeable(CovStruct):
"""
An exchangeable working dependence structure.
"""
def __init__(self):
super(Exchangeable, self).__init__()
# The correlation between any two values in the same cluster
self.dep_params = 0.
def update(self, params):
endog = self.model.endog_li
nobs = self.model.nobs
varfunc = self.model.family.variance
cached_means = self.model.cached_means
has_weights = self.model.weights is not None
weights_li = self.model.weights
residsq_sum, scale = 0, 0
fsum1, fsum2, n_pairs = 0., 0., 0.
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / stdev
f = weights_li[i] if has_weights else 1.
ngrp = len(resid)
residsq = np.outer(resid, resid)
scale += f * np.trace(residsq)
fsum1 += f * len(endog[i])
residsq = np.tril(residsq, -1)
residsq_sum += f * residsq.sum()
npr = 0.5 * ngrp * (ngrp - 1)
fsum2 += f * npr
n_pairs += npr
ddof = self.model.ddof_scale
scale /= (fsum1 * (nobs - ddof) / float(nobs))
residsq_sum /= scale
self.dep_params = residsq_sum / (fsum2 * (n_pairs - ddof) / float(n_pairs))
def covariance_matrix(self, expval, index):
dim = len(expval)
dp = self.dep_params * np.ones((dim, dim), dtype=np.float64)
np.fill_diagonal(dp, 1)
return dp, True
def covariance_matrix_solve(self, expval, index, stdev, rhs):
k = len(expval)
c = self.dep_params / (1. - self.dep_params)
c /= 1. + self.dep_params * (k - 1)
rslt = []
for x in rhs:
if x.ndim == 1:
x1 = x / stdev
y = x1 / (1. - self.dep_params)
y -= c * sum(x1)
y /= stdev
else:
x1 = x / stdev[:, None]
y = x1 / (1. - self.dep_params)
y -= c * x1.sum(0)
y /= stdev[:, None]
rslt.append(y)
return rslt
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__
def summary(self):
return ("The correlation between two observations in the " +
"same cluster is %.3f" % self.dep_params)
class Nested(CovStruct):
"""
A nested working dependence structure.
A working dependence structure that captures a nested hierarchy of
groups, each level of which contributes to the random error term
of the model.
When using this working covariance structure, `dep_data` of the
GEE instance should contain a n_obs x k matrix of 0/1 indicators,
corresponding to the k subgroups nested under the top-level
`groups` of the GEE instance. These subgroups should be nested
from left to right, so that two observations with the same value
for column j of `dep_data` should also have the same value for all
columns j' < j (this only applies to observations in the same
top-level cluster given by the `groups` argument to GEE).
Examples
--------
Suppose our data are student test scores, and the students are in
classrooms, nested in schools, nested in school districts. The
school district is the highest level of grouping, so the school
district id would be provided to GEE as `groups`, and the school
and classroom id's would be provided to the Nested class as the
`dep_data` argument, e.g.
0 0 # School 0, classroom 0, student 0
0 0 # School 0, classroom 0, student 1
0 1 # School 0, classroom 1, student 0
0 1 # School 0, classroom 1, student 1
1 0 # School 1, classroom 0, student 0
1 0 # School 1, classroom 0, student 1
1 1 # School 1, classroom 1, student 0
1 1 # School 1, classroom 1, student 1
Labels lower in the hierarchy are recycled, so that student 0 in
classroom 0 is different fro student 0 in classroom 1, etc.
Notes
-----
The calculations for this dependence structure involve all pairs
of observations within a group (that is, within the top level
`group` structure passed to GEE). Large group sizes will result
in slow iterations.
The variance components are estimated using least squares
regression of the products r*r', for standardized residuals r and
r' in the same group, on a vector of indicators defining which
variance components are shared by r and r'.
"""
def initialize(self, model):
"""
Called on the first call to update
`ilabels` is a list of n_i x n_i matrices containing integer
labels that correspond to specific correlation parameters.
Two elements of ilabels[i] with the same label share identical
variance components.
`designx` is a matrix, with each row containing dummy
variables indicating which variance components are associated
with the corresponding element of QY.
"""
super(Nested, self).initialize(model)
if self.model.weights is not None:
warnings.warn("weights not implemented for nested cov_struct, using unweighted covariance estimate")
# A bit of processing of the nest data
id_matrix = np.asarray(self.model.dep_data)
if id_matrix.ndim == 1:
id_matrix = id_matrix[:,None]
self.id_matrix = id_matrix
endog = self.model.endog_li
designx, ilabels = [], []
# The number of layers of nesting
n_nest = self.id_matrix.shape[1]
for i in range(self.model.num_group):
ngrp = len(endog[i])
glab = self.model.group_labels[i]
rix = self.model.group_indices[glab]
# Determine the number of common variance components
# shared by each pair of observations.
ix1, ix2 = np.tril_indices(ngrp, -1)
ncm = (self.id_matrix[rix[ix1], :] ==
self.id_matrix[rix[ix2], :]).sum(1)
# This is used to construct the working correlation
# matrix.
ilabel = np.zeros((ngrp, ngrp), dtype=np.int32)
ilabel[[ix1, ix2]] = ncm + 1
ilabel[[ix2, ix1]] = ncm + 1
ilabels.append(ilabel)
# This is used to estimate the variance components.
dsx = np.zeros((len(ix1), n_nest+1), dtype=np.float64)
dsx[:,0] = 1
for k in np.unique(ncm):
ii = np.flatnonzero(ncm == k)
dsx[ii, 1:k+1] = 1
designx.append(dsx)
self.designx = np.concatenate(designx, axis=0)
self.ilabels = ilabels
svd = np.linalg.svd(self.designx, 0)
self.designx_u = svd[0]
self.designx_s = svd[1]
self.designx_v = svd[2].T
def update(self, params):
endog = self.model.endog_li
nobs = self.model.nobs
dim = len(params)
if self.designx is None:
self._compute_design(self.model)
cached_means = self.model.cached_means
varfunc = self.model.family.variance
dvmat = []
scale = 0.
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / stdev
ix1, ix2 = np.tril_indices(len(resid), -1)
dvmat.append(resid[ix1] * resid[ix2])
scale += np.sum(resid**2)
dvmat = np.concatenate(dvmat)
scale /= (nobs - dim)
# Use least squares regression to estimate the variance
# components
vcomp_coeff = np.dot(self.designx_v, np.dot(self.designx_u.T,
dvmat) / self.designx_s)
self.vcomp_coeff = np.clip(vcomp_coeff, 0, np.inf)
self.scale = scale
self.dep_params = self.vcomp_coeff.copy()
def covariance_matrix(self, expval, index):
dim = len(expval)
# First iteration
if self.dep_params is None:
return np.eye(dim, dtype=np.float64), True
ilabel = self.ilabels[index]
c = np.r_[self.scale, np.cumsum(self.vcomp_coeff)]
vmat = c[ilabel]
vmat /= self.scale
return vmat, True
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
def summary(self):
"""
Returns a summary string describing the state of the
dependence structure.
"""
msg = "Variance estimates\n------------------\n"
for k in range(len(self.vcomp_coeff)):
msg += "Component %d: %.3f\n" % (k+1, self.vcomp_coeff[k])
msg += "Residual: %.3f\n" % (self.scale -
np.sum(self.vcomp_coeff))
return msg
class Stationary(CovStruct):
"""
A stationary covariance structure.
The correlation between two observations is an arbitrary function
of the distance between them. Distances up to a given maximum
value are included in the covariance model.
Parameters
----------
max_lag : float
The largest distance that is included in the covariance model.
grid : bool
If True, the index positions in the data (after dropping missing
values) are used to define distances, and the `time` variable is
ignored.
"""
def __init__(self, max_lag=1, grid=False):
super(Stationary, self).__init__()
self.max_lag = max_lag
self.grid = grid
self.dep_params = np.zeros(max_lag)
def initialize(self, model):
super(Stationary, self).initialize(model)
# Time used as an index needs to be integer type.
if self.grid == False:
time = self.model.time[:, 0].astype(np.int32)
self.time = self.model.cluster_list(time)
def update(self, params):
if self.grid:
self.update_grid(params)
else:
self.update_nogrid(params)
def update_grid(self, params):
endog = self.model.endog_li
cached_means = self.model.cached_means
varfunc = self.model.family.variance
dep_params = np.zeros(self.max_lag + 1)
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / stdev
dep_params[0] += np.sum(resid * resid) / len(resid)
for j in range(1, self.max_lag + 1):
dep_params[j] += np.sum(resid[0:-j] * resid[j:]) / len(resid[j:])
self.dep_params = dep_params[1:] / dep_params[0]
def update_nogrid(self, params):
endog = self.model.endog_li
cached_means = self.model.cached_means
varfunc = self.model.family.variance
dep_params = np.zeros(self.max_lag + 1)
dn = np.zeros(self.max_lag + 1)
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / stdev
j1, j2 = np.tril_indices(len(expval))
dx = np.abs(self.time[i][j1] - self.time[i][j2])
ii = np.flatnonzero(dx <= self.max_lag)
j1 = j1[ii]
j2 = j2[ii]
dx = dx[ii]
vs = np.bincount(dx, weights=resid[j1] * resid[j2], minlength=self.max_lag+1)
vd = np.bincount(dx, minlength=self.max_lag+1)
ii = np.flatnonzero(vd > 0)
dn[ii] += 1
if len(ii) > 0:
dep_params[ii] += vs[ii] / vd[ii]
dep_params /= dn
self.dep_params = dep_params[1:] / dep_params[0]
def covariance_matrix(self, endog_expval, index):
if self.grid:
return self.covariance_matrix_grid(endog_expal, index)
j1, j2 = np.tril_indices(len(endog_expval))
dx = np.abs(self.time[index][j1] - self.time[index][j2])
ii = np.flatnonzero((0 < dx) & (dx <= self.max_lag))
j1 = j1[ii]
j2 = j2[ii]
dx = dx[ii]
cmat = np.eye(len(endog_expval))
cmat[j1, j2] = self.dep_params[dx - 1]
cmat[j2, j1] = self.dep_params[dx - 1]
return cmat, True
def covariance_matrix_grid(self, endog_expval, index):
from scipy.linalg import toeplitz
r = np.zeros(len(endog_expval))
r[0] = 1
r[1:self.max_lag + 1] = self.dep_params
return toeplitz(r), True
def covariance_matrix_solve(self, expval, index, stdev, rhs):
if self.grid == False:
return super(Stationary, self).covariance_matrix_solve(expval, index, stdev, rhs)
from statsmodels.tools.linalg import stationary_solve
r = np.zeros(len(expval))
r[0:self.max_lag] = self.dep_params
return [stationary_solve(r, x) for x in rhs]
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__
def summary(self):
return ("Stationary dependence parameters\n",
self.dep_params)
class Autoregressive(CovStruct):
"""
A first-order autoregressive working dependence structure.
The dependence is defined in terms of the `time` component of the
parent GEE class, which defaults to the index position of each
value within its cluster, based on the order of values in the
input data set. Time represents a potentially multidimensional
index from which distances between pairs of observations can be
determined.
The correlation between two observations in the same cluster is
dep_params^distance, where `dep_params` contains the (scalar)
autocorrelation parameter to be estimated, and `distance` is the
distance between the two observations, calculated from their
corresponding time values. `time` is stored as an n_obs x k
matrix, where `k` represents the number of dimensions in the time
index.
The autocorrelation parameter is estimated using weighted
nonlinear least squares, regressing each value within a cluster on
each preceeding value in the same cluster.
Parameters
----------
dist_func: function from R^k x R^k to R^+, optional
A function that computes the distance between the two
observations based on their `time` values.
References
----------
B Rosner, A Munoz. Autoregressive modeling for the analysis of
longitudinal data with unequally spaced examinations. Statistics
in medicine. Vol 7, 59-71, 1988.
"""
def __init__(self, dist_func=None):
super(Autoregressive, self).__init__()
# The function for determining distances based on time
if dist_func is None:
self.dist_func = lambda x, y: np.abs(x - y).sum()
else:
self.dist_func = dist_func
self.designx = None
# The autocorrelation parameter
self.dep_params = 0.
def update(self, params):
if self.model.weights is not None:
warnings.warn("weights not implemented for autoregressive cov_struct, using unweighted covariance estimate")
endog = self.model.endog_li
time = self.model.time_li
# Only need to compute this once
if self.designx is not None:
designx = self.designx
else:
designx = []
for i in range(self.model.num_group):
ngrp = len(endog[i])
if ngrp == 0:
continue
# Loop over pairs of observations within a cluster
for j1 in range(ngrp):
for j2 in range(j1):
designx.append(self.dist_func(time[i][j1, :],
time[i][j2, :]))
designx = np.array(designx)
self.designx = designx
scale = self.model.estimate_scale()
varfunc = self.model.family.variance
cached_means = self.model.cached_means
# Weights
var = 1. - self.dep_params**(2*designx)
var /= 1. - self.dep_params**2
wts = 1. / var
wts /= wts.sum()
residmat = []
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(scale * varfunc(expval))
resid = (endog[i] - expval) / stdev
ngrp = len(resid)
for j1 in range(ngrp):
for j2 in range(j1):
residmat.append([resid[j1], resid[j2]])
residmat = np.array(residmat)
# Need to minimize this
def fitfunc(a):
dif = residmat[:, 0] - (a**designx)*residmat[:, 1]
return np.dot(dif**2, wts)
# Left bracket point
b_lft, f_lft = 0., fitfunc(0.)
# Center bracket point
b_ctr, f_ctr = 0.5, fitfunc(0.5)
while f_ctr > f_lft:
b_ctr /= 2
f_ctr = fitfunc(b_ctr)
if b_ctr < 1e-8:
self.dep_params = 0
return
# Right bracket point
b_rgt, f_rgt = 0.75, fitfunc(0.75)
while f_rgt < f_ctr:
b_rgt = b_rgt + (1. - b_rgt) / 2
f_rgt = fitfunc(b_rgt)
if b_rgt > 1. - 1e-6:
raise ValueError(
"Autoregressive: unable to find right bracket")
from scipy.optimize import brent
self.dep_params = brent(fitfunc, brack=[b_lft, b_ctr, b_rgt])
def covariance_matrix(self, endog_expval, index):
ngrp = len(endog_expval)
if self.dep_params == 0:
return np.eye(ngrp, dtype=np.float64), True
idx = np.arange(ngrp)
cmat = self.dep_params**np.abs(idx[:, None] - idx[None, :])
return cmat, True
def covariance_matrix_solve(self, expval, index, stdev, rhs):
# The inverse of an AR(1) covariance matrix is tri-diagonal.
k = len(expval)
soln = []
# LHS has 1 column
if k == 1:
return [x / stdev**2 for x in rhs]
# LHS has 2 columns
if k == 2:
mat = np.array([[1, -self.dep_params], [-self.dep_params, 1]])
mat /= (1. - self.dep_params**2)
for x in rhs:
if x.ndim == 1:
x1 = x / stdev
else:
x1 = x / stdev[:, None]
x1 = np.dot(mat, x1)
if x.ndim == 1:
x1 /= stdev
else:
x1 /= stdev[:, None]
soln.append(x1)
return soln
# LHS has >= 3 columns: values c0, c1, c2 defined below give
# the inverse. c0 is on the diagonal, except for the first
# and last position. c1 is on the first and last position of
# the diagonal. c2 is on the sub/super diagonal.
c0 = (1. + self.dep_params**2) / (1. - self.dep_params**2)
c1 = 1. / (1. - self.dep_params**2)
c2 = -self.dep_params / (1. - self.dep_params**2)
soln = []
for x in rhs:
flatten = False
if x.ndim == 1:
x = x[:, None]
flatten = True
x1 = x / stdev[:, None]
z0 = np.zeros((1, x.shape[1]))
rhs1 = np.concatenate((x[1:,:], z0), axis=0)
rhs2 = np.concatenate((z0, x[0:-1,:]), axis=0)
y = c0*x + c2*rhs1 + c2*rhs2
y[0, :] = c1*x[0, :] + c2*x[1, :]
y[-1, :] = c1*x[-1, :] + c2*x[-2, :]
y /= stdev[:, None]
if flatten:
y = np.squeeze(y)
soln.append(y)
return soln
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__
def summary(self):
return ("Autoregressive(1) dependence parameter: %.3f\n" %
self.dep_params)
class CategoricalCovStruct(CovStruct):
"""
Parent class for covariance structure for categorical data models.
Attributes
----------
nlevel : int
The number of distinct levels for the outcome variable.
ibd : list
A list whose i^th element ibd[i] is an array whose rows
contain integer pairs (a,b), where endog_li[i][a:b] is the
subvector of binary indicators derived from the same ordinal
value.
"""
def initialize(self, model):
super(CategoricalCovStruct, self).initialize(model)
self.nlevel = len(model.endog_values)
self._ncut = self.nlevel - 1
from numpy.lib.stride_tricks import as_strided
b = np.dtype(np.int64).itemsize
ibd = []
for v in model.endog_li:
jj = np.arange(0, len(v) + 1, self._ncut, dtype=np.int64)
jj = as_strided(jj, shape=(len(jj) - 1, 2), strides=(b, b))
ibd.append(jj)
self.ibd = ibd
class GlobalOddsRatio(CategoricalCovStruct):
"""
Estimate the global odds ratio for a GEE with ordinal or nominal
data.
References
----------
PJ Heagerty and S Zeger. "Marginal Regression Models for Clustered
Ordinal Measurements". Journal of the American Statistical
Association Vol. 91, Issue 435 (1996).
Thomas Lumley. Generalized Estimating Equations for Ordinal Data:
A Note on Working Correlation Structures. Biometrics Vol. 52,
No. 1 (Mar., 1996), pp. 354-361
http://www.jstor.org/stable/2533173
Notes
-----
The following data structures are calculated in the class:
'ibd' is a list whose i^th element ibd[i] is a sequence of integer
pairs (a,b), where endog_li[i][a:b] is the subvector of binary
indicators derived from the same ordinal value.
`cpp` is a dictionary where cpp[group] is a map from cut-point
pairs (c,c') to the indices of all between-subject pairs derived
from the given cut points.
"""
def __init__(self, endog_type):
super(GlobalOddsRatio, self).__init__()
self.endog_type = endog_type
self.dep_params = 0.
def initialize(self, model):
super(GlobalOddsRatio, self).initialize(model)
if self.model.weights is not None:
warnings.warn("weights not implemented for GlobalOddsRatio cov_struct, using unweighted covariance estimate")
# Need to restrict to between-subject pairs
cpp = []
for v in model.endog_li:
# Number of subjects in this group
m = int(len(v) / self._ncut)
i1, i2 = np.tril_indices(m, -1)
cpp1 = {}
for k1 in range(self._ncut):
for k2 in range(k1+1):
jj = np.zeros((len(i1), 2), dtype=np.int64)
jj[:, 0] = i1*self._ncut + k1
jj[:, 1] = i2*self._ncut + k2
cpp1[(k2, k1)] = jj
cpp.append(cpp1)
self.cpp = cpp
# Initialize the dependence parameters
self.crude_or = self.observed_crude_oddsratio()
if self.model.update_dep:
self.dep_params = self.crude_or
def pooled_odds_ratio(self, tables):
"""
Returns the pooled odds ratio for a list of 2x2 tables.
The pooled odds ratio is the inverse variance weighted average
of the sample odds ratios of the tables.
"""
if len(tables) == 0:
return 1.
# Get the sampled odds ratios and variances
log_oddsratio, var = [], []
for table in tables:
lor = np.log(table[1, 1]) + np.log(table[0, 0]) -\
np.log(table[0, 1]) - np.log(table[1, 0])
log_oddsratio.append(lor)
var.append((1 / table.astype(np.float64)).sum())
# Calculate the inverse variance weighted average
wts = [1 / v for v in var]
wtsum = sum(wts)
wts = [w / wtsum for w in wts]
log_pooled_or = sum([w*e for w, e in zip(wts, log_oddsratio)])
return np.exp(log_pooled_or)
def covariance_matrix(self, expected_value, index):
vmat = self.get_eyy(expected_value, index)
vmat -= np.outer(expected_value, expected_value)
return vmat, False
def observed_crude_oddsratio(self):
"""
To obtain the crude (global) odds ratio, first pool all binary
indicators corresponding to a given pair of cut points (c,c'),
then calculate the odds ratio for this 2x2 table. The crude
odds ratio is the inverse variance weighted average of these
odds ratios. Since the covariate effects are ignored, this OR
will generally be greater than the stratified OR.
"""
cpp = self.cpp
endog = self.model.endog_li
# Storage for the contingency tables for each (c,c')
tables = {}
for ii in iterkeys(cpp[0]):
tables[ii] = np.zeros((2, 2), dtype=np.float64)
# Get the observed crude OR
for i in range(len(endog)):
# The observed joint values for the current cluster
yvec = endog[i]
endog_11 = np.outer(yvec, yvec)
endog_10 = np.outer(yvec, 1. - yvec)
endog_01 = np.outer(1. - yvec, yvec)
endog_00 = np.outer(1. - yvec, 1. - yvec)
cpp1 = cpp[i]
for ky in iterkeys(cpp1):
ix = cpp1[ky]
tables[ky][1, 1] += endog_11[ix[:, 0], ix[:, 1]].sum()
tables[ky][1, 0] += endog_10[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 1] += endog_01[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 0] += endog_00[ix[:, 0], ix[:, 1]].sum()
return self.pooled_odds_ratio(list(itervalues(tables)))
def get_eyy(self, endog_expval, index):
"""
Returns a matrix V such that V[i,j] is the joint probability
that endog[i] = 1 and endog[j] = 1, based on the marginal
probabilities of endog and the global odds ratio `current_or`.
"""
current_or = self.dep_params
ibd = self.ibd[index]
# The between-observation joint probabilities
if current_or == 1.0:
vmat = np.outer(endog_expval, endog_expval)
else:
psum = endog_expval[:, None] + endog_expval[None, :]
pprod = endog_expval[:, None] * endog_expval[None, :]
pfac = np.sqrt((1. + psum * (current_or - 1.))**2 +
4 * current_or * (1. - current_or) * pprod)
vmat = 1. + psum * (current_or - 1.) - pfac
vmat /= 2. * (current_or - 1)
# Fix E[YY'] for elements that belong to same observation
for bdl in ibd:
evy = endog_expval[bdl[0]:bdl[1]]
if self.endog_type == "ordinal":
vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] =\
np.minimum.outer(evy, evy)
else:
vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] = np.diag(evy)
return vmat
def update(self, params):
"""
Update the global odds ratio based on the current value of
params.
"""
endog = self.model.endog_li
cpp = self.cpp
cached_means = self.model.cached_means
# This will happen if all the clusters have only
# one observation
if len(cpp[0]) == 0:
return
tables = {}
for ii in cpp[0]:
tables[ii] = np.zeros((2, 2), dtype=np.float64)
for i in range(self.model.num_group):
endog_expval, _ = cached_means[i]
emat_11 = self.get_eyy(endog_expval, i)
emat_10 = endog_expval[:, None] - emat_11
emat_01 = -emat_11 + endog_expval
emat_00 = 1. - (emat_11 + emat_10 + emat_01)
cpp1 = cpp[i]
for ky in iterkeys(cpp1):
ix = cpp1[ky]
tables[ky][1, 1] += emat_11[ix[:, 0], ix[:, 1]].sum()
tables[ky][1, 0] += emat_10[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 1] += emat_01[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 0] += emat_00[ix[:, 0], ix[:, 1]].sum()
cor_expval = self.pooled_odds_ratio(list(itervalues(tables)))
self.dep_params *= self.crude_or / cor_expval
if not np.isfinite(self.dep_params):
self.dep_params = 1.
warnings.warn("dep_params became inf, resetting to 1",
ConvergenceWarning)
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
def summary(self):
return "Global odds ratio: %.3f\n" % self.dep_params
class OrdinalIndependence(CategoricalCovStruct):
"""
An independence covariance structure for ordinal models.
The working covariance between indicators derived from different
observations is zero. The working covariance between indicators
derived form a common observation is determined from their current
mean values.
There are no parameters to estimate in this covariance structure.
"""
def covariance_matrix(self, expected_value, index):
ibd = self.ibd[index]
n = len(expected_value)
vmat = np.zeros((n, n))
for bdl in ibd:
ev = expected_value[bdl[0]:bdl[1]]
vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] =\
np.minimum.outer(ev, ev) - np.outer(ev, ev)
return vmat, False
# Nothing to update
def update(self, params):
pass
class NominalIndependence(CategoricalCovStruct):
"""
An independence covariance structure for nominal models.
The working covariance between indicators derived from different
observations is zero. The working covariance between indicators
derived form a common observation is determined from their current
mean values.
There are no parameters to estimate in this covariance structure.
"""
def covariance_matrix(self, expected_value, index):
ibd = self.ibd[index]
n = len(expected_value)
vmat = np.zeros((n, n))
for bdl in ibd:
ev = expected_value[bdl[0]:bdl[1]]
vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] =\
np.diag(ev) - np.outer(ev, ev)
return vmat, False
# Nothing to update
def update(self, params):
pass
class Equivalence(CovStruct):
"""
A covariance structure defined in terms of equivalence classes.
An 'equivalence class' is a set of pairs of observations such that
the covariance of every pair within the equivalence class has a
common value.
Parameters
----------
pairs : dict-like
A dictionary of dictionaries, where `pairs[group][label]`
provides the indices of all pairs of observations in the group
that have the same covariance value. Specifically,
`pairs[group][label]` is a tuple `(j1, j2)`, where `j1` and `j2`
are integer arrays of the same length. `j1[i], j2[i]` is one
index pair that belongs to the `label` equivalence class. Only
one triangle of each covariance matrix should be included.
Positions where j1 and j2 have the same value are variance
parameters.
labels : array-like
An array of labels such that every distinct pair of labels
defines an equivalence class. Either `labels` or `pairs` must
be provided. When the two labels in a pair are equal two
equivalence classes are defined: one for the diagonal elements
(corresponding to variances) and one for the off-diagonal
elements (corresponding to covariances).
return_cov : boolean
If True, `covariance_matrix` returns an estimate of the
covariance matrix, otherwise returns an estimate of the
correlation matrix.
Notes
-----
Using `labels` to define the class is much easier than using
`pairs`, but is less general.
Any pair of values not contained in `pairs` will be assigned zero
covariance.
The index values in `pairs` are row indices into the `exog`
matrix. They are not updated if missing data are present. When
using this covariance structure, missing data should be removed
before constructing the model.
If using `labels`, after a model is defined using the covariance
structure it is possible to remove a label pair from the second
level of the `pairs` dictionary to force the corresponding
covariance to be zero.
Examples
--------
The following sets up the `pairs` dictionary for a model with two
groups, equal variance for all observations, and constant
covariance for all pairs of observations within each group.
>> pairs = {0: {}, 1: {}}
>> pairs[0][0] = (np.r_[0, 1, 2], np.r_[0, 1, 2])
>> pairs[0][1] = np.tril_indices(3, -1)
>> pairs[1][0] = (np.r_[3, 4, 5], np.r_[3, 4, 5])
>> pairs[1][2] = 3 + np.tril_indices(3, -1)
"""
def __init__(self, pairs=None, labels=None, return_cov=False):
super(Equivalence, self).__init__()
if (pairs is None) and (labels is None):
raise ValueError("Equivalence cov_struct requires either `pairs` or `labels`")
if (pairs is not None) and (labels is not None):
raise ValueError("Equivalence cov_struct accepts only one of `pairs` and `labels`")
if pairs is not None:
import copy
self.pairs = copy.deepcopy(pairs)
if labels is not None:
self.labels = np.asarray(labels)
self.return_cov = return_cov
def _make_pairs(self, i, j):
"""
Create arrays `i_`, `j_` containing all unique ordered pairs of elements in `i` and `j`.
The arrays `i` and `j` must be one-dimensional containing non-negative integers.
"""
mat = np.zeros((len(i)*len(j), 2), dtype=np.int32)
# Create the pairs and order them
f = np.ones(len(j))
mat[:, 0] = np.kron(f, i).astype(np.int32)
f = np.ones(len(i))
mat[:, 1] = np.kron(j, f).astype(np.int32)
mat.sort(1)
# Remove repeated rows
try:
dtype = np.dtype((np.void, mat.dtype.itemsize * mat.shape[1]))
bmat = np.ascontiguousarray(mat).view(dtype)
_, idx = np.unique(bmat, return_index=True)
except TypeError:
# workaround for old numpy that can't call unique with complex
# dtypes
np.random.seed(4234)
bmat = np.dot(mat, np.random.uniform(size=mat.shape[1]))
_, idx = np.unique(bmat, return_index=True)
mat = mat[idx, :]
return mat[:, 0], mat[:, 1]
def _pairs_from_labels(self):
from collections import defaultdict
pairs = defaultdict(lambda : defaultdict(lambda : None))
model = self.model
df = pd.DataFrame({"labels": self.labels, "groups": model.groups})
gb = df.groupby(["groups", "labels"])
ulabels = np.unique(self.labels)
for g_ix, g_lb in enumerate(model.group_labels):
# Loop over label pairs
for lx1 in range(len(ulabels)):
for lx2 in range(lx1+1):
lb1 = ulabels[lx1]
lb2 = ulabels[lx2]
try:
i1 = gb.groups[(g_lb, lb1)]
i2 = gb.groups[(g_lb, lb2)]
except KeyError:
continue
i1, i2 = self._make_pairs(i1, i2)
clabel = str(lb1) + "/" + str(lb2)
# Variance parameters belong in their own equiv class.
jj = np.flatnonzero(i1 == i2)
if len(jj) > 0:
clabelv = clabel + "/v"
pairs[g_lb][clabelv] = (i1[jj], i2[jj])
# Covariance parameters
jj = np.flatnonzero(i1 != i2)
if len(jj) > 0:
i1 = i1[jj]
i2 = i2[jj]
pairs[g_lb][clabel] = (i1, i2)
self.pairs = pairs
def initialize(self, model):
super(Equivalence, self).initialize(model)
if self.model.weights is not None:
warnings.warn("weights not implemented for equalence cov_struct, using unweighted covariance estimate")
if not hasattr(self, 'pairs'):
self._pairs_from_labels()
# Initialize so that any equivalence class containing a
# variance parameter has value 1.
self.dep_params = defaultdict(lambda : 0.)
self._var_classes = set([])
for gp in self.model.group_labels:
for lb in self.pairs[gp]:
j1, j2 = self.pairs[gp][lb]
if np.any(j1 == j2):
if not np.all(j1 == j2):
warnings.warn("equivalence class contains both variance and covariance parameters")
self._var_classes.add(lb)
self.dep_params[lb] = 1
# Need to start indexing at 0 within each group.
# rx maps olds indices to new indices
rx = -1 * np.ones(len(self.model.endog), dtype=np.int32)
for g_ix, g_lb in enumerate(self.model.group_labels):
ii = self.model.group_indices[g_lb]
rx[ii] = np.arange(len(ii), dtype=np.int32)
# Reindex
for gp in self.model.group_labels:
for lb in self.pairs[gp].keys():
a, b = self.pairs[gp][lb]
self.pairs[gp][lb] = (rx[a], rx[b])
def update(self, params):
endog = self.model.endog_li
varfunc = self.model.family.variance
cached_means = self.model.cached_means
dep_params = defaultdict(lambda : [0., 0., 0.])
n_pairs = defaultdict(lambda : 0)
dim = len(params)
for k, gp in enumerate(self.model.group_labels):
expval, _ = cached_means[k]
stdev = np.sqrt(varfunc(expval))
resid = (endog[k] - expval) / stdev
for lb in self.pairs[gp].keys():
if (not self.return_cov) and lb in self._var_classes:
continue
jj = self.pairs[gp][lb]
dep_params[lb][0] += np.sum(resid[jj[0]] * resid[jj[1]])
if not self.return_cov:
dep_params[lb][1] += np.sum(resid[jj[0]]**2)
dep_params[lb][2] += np.sum(resid[jj[1]]**2)
n_pairs[lb] += len(jj[0])
if self.return_cov:
for lb in dep_params.keys():
dep_params[lb] = dep_params[lb][0] / (n_pairs[lb] - dim)
else:
for lb in dep_params.keys():
den = np.sqrt(dep_params[lb][1] * dep_params[lb][2])
dep_params[lb] = dep_params[lb][0] / den
for lb in self._var_classes:
dep_params[lb] = 1.
self.dep_params = dep_params
self.n_pairs = n_pairs
def covariance_matrix(self, expval, index):
dim = len(expval)
cmat = np.zeros((dim, dim))
g_lb = self.model.group_labels[index]
for lb in self.pairs[g_lb].keys():
j1, j2 = self.pairs[g_lb][lb]
cmat[j1, j2] = self.dep_params[lb]
cmat = cmat + cmat.T
np.fill_diagonal(cmat, cmat.diagonal() / 2)
return cmat, not self.return_cov
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
| bsd-3-clause |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/matplotlib/backends/backend_gtk3.py | 8 | 39097 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import os, sys
def fn_name(): return sys._getframe(1).f_code.co_name
try:
import gi
except ImportError:
raise ImportError("Gtk3 backend requires pygobject to be installed.")
try:
gi.require_version("Gtk", "3.0")
except AttributeError:
raise ImportError(
"pygobject version too old -- it must have require_version")
except ValueError:
raise ImportError(
"Gtk3 backend requires the GObject introspection bindings for Gtk 3 "
"to be installed.")
try:
from gi.repository import Gtk, Gdk, GObject, GLib
except ImportError:
raise ImportError("Gtk3 backend requires pygobject to be installed.")
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors, TimerBase
from matplotlib.backend_bases import (ShowBase, ToolContainerBase,
StatusbarBase)
from matplotlib.backend_managers import ToolManager
from matplotlib import backend_tools
from matplotlib.cbook import is_string_like, is_writable_file_like
from matplotlib.colors import colorConverter
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
from matplotlib import lines
from matplotlib import cbook
from matplotlib import verbose
from matplotlib import rcParams
backend_version = "%s.%s.%s" % (Gtk.get_major_version(), Gtk.get_micro_version(), Gtk.get_minor_version())
_debug = False
#_debug = True
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 96
cursord = {
cursors.MOVE : Gdk.Cursor.new(Gdk.CursorType.FLEUR),
cursors.HAND : Gdk.Cursor.new(Gdk.CursorType.HAND2),
cursors.POINTER : Gdk.Cursor.new(Gdk.CursorType.LEFT_PTR),
cursors.SELECT_REGION : Gdk.Cursor.new(Gdk.CursorType.TCROSS),
}
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw_idle()
class Show(ShowBase):
def mainloop(self):
if Gtk.main_level() == 0:
Gtk.main()
show = Show()
class TimerGTK3(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses GTK3 for timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def _timer_start(self):
# Need to stop it, otherwise we potentially leak a timer id that will
# never be stopped.
self._timer_stop()
self._timer = GLib.timeout_add(self._interval, self._on_timer)
def _timer_stop(self):
if self._timer is not None:
GLib.source_remove(self._timer)
self._timer = None
def _timer_set_interval(self):
# Only stop and restart it if the timer has already been started
if self._timer is not None:
self._timer_stop()
self._timer_start()
def _on_timer(self):
TimerBase._on_timer(self)
# Gtk timeout_add() requires that the callback returns True if it
# is to be called again.
if len(self.callbacks) > 0 and not self._single:
return True
else:
self._timer = None
return False
class FigureCanvasGTK3 (Gtk.DrawingArea, FigureCanvasBase):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
}
# Setting this as a static constant prevents
# this resulting expression from leaking
event_mask = (Gdk.EventMask.BUTTON_PRESS_MASK |
Gdk.EventMask.BUTTON_RELEASE_MASK |
Gdk.EventMask.EXPOSURE_MASK |
Gdk.EventMask.KEY_PRESS_MASK |
Gdk.EventMask.KEY_RELEASE_MASK |
Gdk.EventMask.ENTER_NOTIFY_MASK |
Gdk.EventMask.LEAVE_NOTIFY_MASK |
Gdk.EventMask.POINTER_MOTION_MASK |
Gdk.EventMask.POINTER_MOTION_HINT_MASK|
Gdk.EventMask.SCROLL_MASK)
def __init__(self, figure):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
FigureCanvasBase.__init__(self, figure)
GObject.GObject.__init__(self)
self._idle_draw_id = 0
self._need_redraw = True
self._lastCursor = None
self.connect('scroll_event', self.scroll_event)
self.connect('button_press_event', self.button_press_event)
self.connect('button_release_event', self.button_release_event)
self.connect('configure_event', self.configure_event)
self.connect('draw', self.on_draw_event)
self.connect('key_press_event', self.key_press_event)
self.connect('key_release_event', self.key_release_event)
self.connect('motion_notify_event', self.motion_notify_event)
self.connect('leave_notify_event', self.leave_notify_event)
self.connect('enter_notify_event', self.enter_notify_event)
self.connect('size_allocate', self.size_allocate)
self.set_events(self.__class__.event_mask)
self.set_double_buffered(True)
self.set_can_focus(True)
self._renderer_init()
default_context = GLib.main_context_get_thread_default() or GLib.main_context_default()
def destroy(self):
#Gtk.DrawingArea.destroy(self)
self.close_event()
if self._idle_draw_id != 0:
GLib.source_remove(self._idle_draw_id)
def scroll_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - event.y
if event.direction==Gdk.ScrollDirection.UP:
step = 1
else:
step = -1
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
return False # finish event propagation?
def button_press_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - event.y
FigureCanvasBase.button_press_event(self, x, y, event.button, guiEvent=event)
return False # finish event propagation?
def button_release_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - event.y
FigureCanvasBase.button_release_event(self, x, y, event.button, guiEvent=event)
return False # finish event propagation?
def key_press_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
key = self._get_key(event)
if _debug: print("hit", key)
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
return False # finish event propagation?
def key_release_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
key = self._get_key(event)
if _debug: print("release", key)
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
return False # finish event propagation?
def motion_notify_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
if event.is_hint:
t, x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.get_state()
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - y
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
return False # finish event propagation?
def leave_notify_event(self, widget, event):
FigureCanvasBase.leave_notify_event(self, event)
def enter_notify_event(self, widget, event):
FigureCanvasBase.enter_notify_event(self, event)
def size_allocate(self, widget, allocation):
if _debug:
print("FigureCanvasGTK3.%s" % fn_name())
print("size_allocate (%d x %d)" % (allocation.width, allocation.height))
dpival = self.figure.dpi
winch = allocation.width / dpival
hinch = allocation.height / dpival
self.figure.set_size_inches(winch, hinch)
FigureCanvasBase.resize_event(self)
self.draw_idle()
def _get_key(self, event):
if event.keyval in self.keyvald:
key = self.keyvald[event.keyval]
elif event.keyval < 256:
key = chr(event.keyval)
else:
key = None
modifiers = [
(Gdk.ModifierType.MOD4_MASK, 'super'),
(Gdk.ModifierType.MOD1_MASK, 'alt'),
(Gdk.ModifierType.CONTROL_MASK, 'ctrl'),
]
for key_mask, prefix in modifiers:
if event.state & key_mask:
key = '{0}+{1}'.format(prefix, key)
return key
def configure_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
if widget.get_property("window") is None:
return
w, h = event.width, event.height
if w < 3 or h < 3:
return # empty fig
# resize the figure (in inches)
dpi = self.figure.dpi
self.figure.set_size_inches (w/dpi, h/dpi)
self._need_redraw = True
return False # finish event propagation?
def on_draw_event(self, widget, ctx):
# to be overwritten by GTK3Agg or GTK3Cairo
pass
def draw(self):
self._need_redraw = True
if self.get_visible() and self.get_mapped():
self.queue_draw()
# do a synchronous draw (its less efficient than an async draw,
# but is required if/when animation is used)
self.get_property("window").process_updates (False)
def draw_idle(self):
if self._idle_draw_id != 0:
return
def idle_draw(*args):
try:
self.draw()
finally:
self._idle_draw_id = 0
return False
self._idle_draw_id = GLib.idle_add(idle_draw)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerGTK3(*args, **kwargs)
def flush_events(self):
Gdk.threads_enter()
while Gtk.events_pending():
Gtk.main_iteration()
Gdk.flush()
Gdk.threads_leave()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerGTK3(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The Gtk.Toolbar (gtk only)
vbox : The Gtk.VBox containing the canvas and toolbar (gtk only)
window : The Gtk.Window (gtk only)
"""
def __init__(self, canvas, num):
if _debug: print('FigureManagerGTK3.%s' % fn_name())
FigureManagerBase.__init__(self, canvas, num)
self.window = Gtk.Window()
self.set_window_title("Figure %d" % num)
try:
self.window.set_icon_from_file(window_icon)
except (SystemExit, KeyboardInterrupt):
# re-raise exit type Exceptions
raise
except:
# some versions of gtk throw a glib.GError but not
# all, so I am not sure how to catch it. I am unhappy
# doing a blanket catch here, but am not sure what a
# better way is - JDH
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
self.vbox = Gtk.Box()
self.vbox.set_property("orientation", Gtk.Orientation.VERTICAL)
self.window.add(self.vbox)
self.vbox.show()
self.canvas.show()
self.vbox.pack_start(self.canvas, True, True, 0)
# calculate size for window
w = int (self.canvas.figure.bbox.width)
h = int (self.canvas.figure.bbox.height)
self.toolmanager = self._get_toolmanager()
self.toolbar = self._get_toolbar()
self.statusbar = None
def add_widget(child, expand, fill, padding):
child.show()
self.vbox.pack_end(child, False, False, 0)
size_request = child.size_request()
return size_request.height
if self.toolmanager:
backend_tools.add_tools_to_manager(self.toolmanager)
if self.toolbar:
backend_tools.add_tools_to_container(self.toolbar)
self.statusbar = StatusbarGTK3(self.toolmanager)
h += add_widget(self.statusbar, False, False, 0)
h += add_widget(Gtk.HSeparator(), False, False, 0)
if self.toolbar is not None:
self.toolbar.show()
h += add_widget(self.toolbar, False, False, 0)
self.window.set_default_size (w, h)
def destroy(*args):
Gcf.destroy(num)
self.window.connect("destroy", destroy)
self.window.connect("delete_event", destroy)
if matplotlib.is_interactive():
self.window.show()
self.canvas.draw_idle()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolmanager is not None:
pass
elif self.toolbar is not None:
self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
self.canvas.grab_focus()
def destroy(self, *args):
if _debug: print('FigureManagerGTK3.%s' % fn_name())
self.vbox.destroy()
self.window.destroy()
self.canvas.destroy()
if self.toolbar:
self.toolbar.destroy()
if Gcf.get_num_fig_managers()==0 and \
not matplotlib.is_interactive() and \
Gtk.main_level() >= 1:
Gtk.main_quit()
def show(self):
# show the figure window
self.window.show()
def full_screen_toggle (self):
self._full_screen_flag = not self._full_screen_flag
if self._full_screen_flag:
self.window.fullscreen()
else:
self.window.unfullscreen()
_full_screen_flag = False
def _get_toolbar(self):
# must be inited after the window, drawingArea and figure
# attrs are set
if rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2GTK3 (self.canvas, self.window)
elif rcParams['toolbar'] == 'toolmanager':
toolbar = ToolbarGTK3(self.toolmanager)
else:
toolbar = None
return toolbar
def _get_toolmanager(self):
# must be initialised after toolbar has been setted
if rcParams['toolbar'] != 'toolbar2':
toolmanager = ToolManager(self.canvas)
else:
toolmanager = None
return toolmanager
def get_window_title(self):
return self.window.get_title()
def set_window_title(self, title):
self.window.set_title(title)
def resize(self, width, height):
'set the canvas size in pixels'
#_, _, cw, ch = self.canvas.allocation
#_, _, ww, wh = self.window.allocation
#self.window.resize (width-cw+ww, height-ch+wh)
self.window.resize(width, height)
class NavigationToolbar2GTK3(NavigationToolbar2, Gtk.Toolbar):
def __init__(self, canvas, window):
self.win = window
GObject.GObject.__init__(self)
NavigationToolbar2.__init__(self, canvas)
self.ctx = None
def set_message(self, s):
self.message.set_label(s)
def set_cursor(self, cursor):
self.canvas.get_property("window").set_cursor(cursord[cursor])
#self.canvas.set_cursor(cursord[cursor])
def release(self, event):
try: del self._pixmapBack
except AttributeError: pass
def dynamic_update(self):
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
def draw_rubberband(self, event, x0, y0, x1, y1):
'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
self.ctx = self.canvas.get_property("window").cairo_create()
# todo: instead of redrawing the entire figure, copy the part of
# the figure that was covered by the previous rubberband rectangle
self.canvas.draw()
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val) for val in (min(x0,x1), min(y0, y1), w, h)]
self.ctx.new_path()
self.ctx.set_line_width(0.5)
self.ctx.rectangle(rect[0], rect[1], rect[2], rect[3])
self.ctx.set_source_rgb(0, 0, 0)
self.ctx.stroke()
def _init_toolbar(self):
self.set_style(Gtk.ToolbarStyle.ICONS)
basedir = os.path.join(rcParams['datapath'],'images')
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.insert( Gtk.SeparatorToolItem(), -1 )
continue
fname = os.path.join(basedir, image_file + '.png')
image = Gtk.Image()
image.set_from_file(fname)
tbutton = Gtk.ToolButton()
tbutton.set_label(text)
tbutton.set_icon_widget(image)
self.insert(tbutton, -1)
tbutton.connect('clicked', getattr(self, callback))
tbutton.set_tooltip_text(tooltip_text)
toolitem = Gtk.SeparatorToolItem()
self.insert(toolitem, -1)
toolitem.set_draw(False)
toolitem.set_expand(True)
toolitem = Gtk.ToolItem()
self.insert(toolitem, -1)
self.message = Gtk.Label()
toolitem.add(self.message)
self.show_all()
def get_filechooser(self):
fc = FileChooserDialog(
title='Save the figure',
parent=self.win,
path=os.path.expanduser(rcParams.get('savefig.directory', '')),
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
fc.set_current_name(self.canvas.get_default_filename())
return fc
def save_figure(self, *args):
chooser = self.get_filechooser()
fname, format = chooser.get_filename_from_user()
chooser.destroy()
if fname:
startpath = os.path.expanduser(rcParams.get('savefig.directory', ''))
if startpath == '':
# explicitly missing key or empty str signals to use cwd
rcParams['savefig.directory'] = startpath
else:
# save dir for next time
rcParams['savefig.directory'] = os.path.dirname(six.text_type(fname))
try:
self.canvas.print_figure(fname, format=format)
except Exception as e:
error_msg_gtk(str(e), parent=self)
def configure_subplots(self, button):
toolfig = Figure(figsize=(6,3))
canvas = self._get_canvas(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
w = int (toolfig.bbox.width)
h = int (toolfig.bbox.height)
window = Gtk.Window()
try:
window.set_icon_from_file(window_icon)
except (SystemExit, KeyboardInterrupt):
# re-raise exit type Exceptions
raise
except:
# we presumably already logged a message on the
# failure of the main plot, don't keep reporting
pass
window.set_title("Subplot Configuration Tool")
window.set_default_size(w, h)
vbox = Gtk.Box()
vbox.set_property("orientation", Gtk.Orientation.VERTICAL)
window.add(vbox)
vbox.show()
canvas.show()
vbox.pack_start(canvas, True, True, 0)
window.show()
def _get_canvas(self, fig):
return self.canvas.__class__(fig)
class FileChooserDialog(Gtk.FileChooserDialog):
"""GTK+ file selector which remembers the last file/directory
selected and presents the user with a menu of supported image formats
"""
def __init__ (self,
title = 'Save file',
parent = None,
action = Gtk.FileChooserAction.SAVE,
buttons = (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE, Gtk.ResponseType.OK),
path = None,
filetypes = [],
default_filetype = None
):
super (FileChooserDialog, self).__init__ (title, parent, action,
buttons)
self.set_default_response (Gtk.ResponseType.OK)
if not path: path = os.getcwd() + os.sep
# create an extra widget to list supported image formats
self.set_current_folder (path)
self.set_current_name ('image.' + default_filetype)
hbox = Gtk.Box(spacing=10)
hbox.pack_start(Gtk.Label(label="File Format:"), False, False, 0)
liststore = Gtk.ListStore(GObject.TYPE_STRING)
cbox = Gtk.ComboBox() #liststore)
cbox.set_model(liststore)
cell = Gtk.CellRendererText()
cbox.pack_start(cell, True)
cbox.add_attribute(cell, 'text', 0)
hbox.pack_start(cbox, False, False, 0)
self.filetypes = filetypes
self.sorted_filetypes = list(six.iteritems(filetypes))
self.sorted_filetypes.sort()
default = 0
for i, (ext, name) in enumerate(self.sorted_filetypes):
liststore.append(["%s (*.%s)" % (name, ext)])
if ext == default_filetype:
default = i
cbox.set_active(default)
self.ext = default_filetype
def cb_cbox_changed (cbox, data=None):
"""File extension changed"""
head, filename = os.path.split(self.get_filename())
root, ext = os.path.splitext(filename)
ext = ext[1:]
new_ext = self.sorted_filetypes[cbox.get_active()][0]
self.ext = new_ext
if ext in self.filetypes:
filename = root + '.' + new_ext
elif ext == '':
filename = filename.rstrip('.') + '.' + new_ext
self.set_current_name (filename)
cbox.connect ("changed", cb_cbox_changed)
hbox.show_all()
self.set_extra_widget(hbox)
def get_filename_from_user (self):
while True:
filename = None
if self.run() != int(Gtk.ResponseType.OK):
break
filename = self.get_filename()
break
return filename, self.ext
class RubberbandGTK3(backend_tools.RubberbandBase):
def __init__(self, *args, **kwargs):
backend_tools.RubberbandBase.__init__(self, *args, **kwargs)
self.ctx = None
def draw_rubberband(self, x0, y0, x1, y1):
# 'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/
# Recipe/189744'
self.ctx = self.figure.canvas.get_property("window").cairo_create()
# todo: instead of redrawing the entire figure, copy the part of
# the figure that was covered by the previous rubberband rectangle
self.figure.canvas.draw()
height = self.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val) for val in (min(x0, x1), min(y0, y1), w, h)]
self.ctx.new_path()
self.ctx.set_line_width(0.5)
self.ctx.rectangle(rect[0], rect[1], rect[2], rect[3])
self.ctx.set_source_rgb(0, 0, 0)
self.ctx.stroke()
class ToolbarGTK3(ToolContainerBase, Gtk.Box):
def __init__(self, toolmanager):
ToolContainerBase.__init__(self, toolmanager)
Gtk.Box.__init__(self)
self.set_property("orientation", Gtk.Orientation.VERTICAL)
self._toolarea = Gtk.Box()
self._toolarea.set_property('orientation', Gtk.Orientation.HORIZONTAL)
self.pack_start(self._toolarea, False, False, 0)
self._toolarea.show_all()
self._groups = {}
self._toolitems = {}
def add_toolitem(self, name, group, position, image_file, description,
toggle):
if toggle:
tbutton = Gtk.ToggleToolButton()
else:
tbutton = Gtk.ToolButton()
tbutton.set_label(name)
if image_file is not None:
image = Gtk.Image()
image.set_from_file(image_file)
tbutton.set_icon_widget(image)
if position is None:
position = -1
self._add_button(tbutton, group, position)
signal = tbutton.connect('clicked', self._call_tool, name)
tbutton.set_tooltip_text(description)
tbutton.show_all()
self._toolitems.setdefault(name, [])
self._toolitems[name].append((tbutton, signal))
def _add_button(self, button, group, position):
if group not in self._groups:
if self._groups:
self._add_separator()
toolbar = Gtk.Toolbar()
toolbar.set_style(Gtk.ToolbarStyle.ICONS)
self._toolarea.pack_start(toolbar, False, False, 0)
toolbar.show_all()
self._groups[group] = toolbar
self._groups[group].insert(button, position)
def _call_tool(self, btn, name):
self.trigger_tool(name)
def toggle_toolitem(self, name, toggled):
if name not in self._toolitems:
return
for toolitem, signal in self._toolitems[name]:
toolitem.handler_block(signal)
toolitem.set_active(toggled)
toolitem.handler_unblock(signal)
def remove_toolitem(self, name):
if name not in self._toolitems:
self.toolmanager.message_event('%s Not in toolbar' % name, self)
return
for group in self._groups:
for toolitem, _signal in self._toolitems[name]:
if toolitem in self._groups[group]:
self._groups[group].remove(toolitem)
del self._toolitems[name]
def _add_separator(self):
sep = Gtk.Separator()
sep.set_property("orientation", Gtk.Orientation.VERTICAL)
self._toolarea.pack_start(sep, False, True, 0)
sep.show_all()
class StatusbarGTK3(StatusbarBase, Gtk.Statusbar):
def __init__(self, *args, **kwargs):
StatusbarBase.__init__(self, *args, **kwargs)
Gtk.Statusbar.__init__(self)
self._context = self.get_context_id('message')
def set_message(self, s):
self.pop(self._context)
self.push(self._context, s)
class SaveFigureGTK3(backend_tools.SaveFigureBase):
def get_filechooser(self):
fc = FileChooserDialog(
title='Save the figure',
parent=self.figure.canvas.manager.window,
path=os.path.expanduser(rcParams.get('savefig.directory', '')),
filetypes=self.figure.canvas.get_supported_filetypes(),
default_filetype=self.figure.canvas.get_default_filetype())
fc.set_current_name(self.figure.canvas.get_default_filename())
return fc
def trigger(self, *args, **kwargs):
chooser = self.get_filechooser()
fname, format_ = chooser.get_filename_from_user()
chooser.destroy()
if fname:
startpath = os.path.expanduser(
rcParams.get('savefig.directory', ''))
if startpath == '':
# explicitly missing key or empty str signals to use cwd
rcParams['savefig.directory'] = startpath
else:
# save dir for next time
rcParams['savefig.directory'] = os.path.dirname(
six.text_type(fname))
try:
self.figure.canvas.print_figure(fname, format=format_)
except Exception as e:
error_msg_gtk(str(e), parent=self)
class SetCursorGTK3(backend_tools.SetCursorBase):
def set_cursor(self, cursor):
self.figure.canvas.get_property("window").set_cursor(cursord[cursor])
class ConfigureSubplotsGTK3(backend_tools.ConfigureSubplotsBase, Gtk.Window):
def __init__(self, *args, **kwargs):
backend_tools.ConfigureSubplotsBase.__init__(self, *args, **kwargs)
self.window = None
def init_window(self):
if self.window:
return
self.window = Gtk.Window(title="Subplot Configuration Tool")
try:
self.window.window.set_icon_from_file(window_icon)
except (SystemExit, KeyboardInterrupt):
# re-raise exit type Exceptions
raise
except:
# we presumably already logged a message on the
# failure of the main plot, don't keep reporting
pass
self.vbox = Gtk.Box()
self.vbox.set_property("orientation", Gtk.Orientation.VERTICAL)
self.window.add(self.vbox)
self.vbox.show()
self.window.connect('destroy', self.destroy)
toolfig = Figure(figsize=(6, 3))
canvas = self.figure.canvas.__class__(toolfig)
toolfig.subplots_adjust(top=0.9)
SubplotTool(self.figure, toolfig)
w = int(toolfig.bbox.width)
h = int(toolfig.bbox.height)
self.window.set_default_size(w, h)
canvas.show()
self.vbox.pack_start(canvas, True, True, 0)
self.window.show()
def destroy(self, *args):
self.window.destroy()
self.window = None
def _get_canvas(self, fig):
return self.canvas.__class__(fig)
def trigger(self, sender, event, data=None):
self.init_window()
self.window.present()
class DialogLineprops(object):
"""
A GUI dialog for controlling lineprops
"""
signals = (
'on_combobox_lineprops_changed',
'on_combobox_linestyle_changed',
'on_combobox_marker_changed',
'on_colorbutton_linestyle_color_set',
'on_colorbutton_markerface_color_set',
'on_dialog_lineprops_okbutton_clicked',
'on_dialog_lineprops_cancelbutton_clicked',
)
linestyles = [ls for ls in lines.Line2D.lineStyles if ls.strip()]
linestyled = dict([ (s,i) for i,s in enumerate(linestyles)])
markers = [m for m in lines.Line2D.markers if cbook.is_string_like(m)]
markerd = dict([(s,i) for i,s in enumerate(markers)])
def __init__(self, lines):
import Gtk.glade
datadir = matplotlib.get_data_path()
gladefile = os.path.join(datadir, 'lineprops.glade')
if not os.path.exists(gladefile):
raise IOError('Could not find gladefile lineprops.glade in %s'%datadir)
self._inited = False
self._updateson = True # suppress updates when setting widgets manually
self.wtree = Gtk.glade.XML(gladefile, 'dialog_lineprops')
self.wtree.signal_autoconnect(dict([(s, getattr(self, s)) for s in self.signals]))
self.dlg = self.wtree.get_widget('dialog_lineprops')
self.lines = lines
cbox = self.wtree.get_widget('combobox_lineprops')
cbox.set_active(0)
self.cbox_lineprops = cbox
cbox = self.wtree.get_widget('combobox_linestyles')
for ls in self.linestyles:
cbox.append_text(ls)
cbox.set_active(0)
self.cbox_linestyles = cbox
cbox = self.wtree.get_widget('combobox_markers')
for m in self.markers:
cbox.append_text(m)
cbox.set_active(0)
self.cbox_markers = cbox
self._lastcnt = 0
self._inited = True
def show(self):
'populate the combo box'
self._updateson = False
# flush the old
cbox = self.cbox_lineprops
for i in range(self._lastcnt-1,-1,-1):
cbox.remove_text(i)
# add the new
for line in self.lines:
cbox.append_text(line.get_label())
cbox.set_active(0)
self._updateson = True
self._lastcnt = len(self.lines)
self.dlg.show()
def get_active_line(self):
'get the active line'
ind = self.cbox_lineprops.get_active()
line = self.lines[ind]
return line
def get_active_linestyle(self):
'get the active lineinestyle'
ind = self.cbox_linestyles.get_active()
ls = self.linestyles[ind]
return ls
def get_active_marker(self):
'get the active lineinestyle'
ind = self.cbox_markers.get_active()
m = self.markers[ind]
return m
def _update(self):
'update the active line props from the widgets'
if not self._inited or not self._updateson: return
line = self.get_active_line()
ls = self.get_active_linestyle()
marker = self.get_active_marker()
line.set_linestyle(ls)
line.set_marker(marker)
button = self.wtree.get_widget('colorbutton_linestyle')
color = button.get_color()
r, g, b = [val/65535. for val in (color.red, color.green, color.blue)]
line.set_color((r,g,b))
button = self.wtree.get_widget('colorbutton_markerface')
color = button.get_color()
r, g, b = [val/65535. for val in (color.red, color.green, color.blue)]
line.set_markerfacecolor((r,g,b))
line.figure.canvas.draw()
def on_combobox_lineprops_changed(self, item):
'update the widgets from the active line'
if not self._inited: return
self._updateson = False
line = self.get_active_line()
ls = line.get_linestyle()
if ls is None: ls = 'None'
self.cbox_linestyles.set_active(self.linestyled[ls])
marker = line.get_marker()
if marker is None: marker = 'None'
self.cbox_markers.set_active(self.markerd[marker])
r,g,b = colorConverter.to_rgb(line.get_color())
color = Gdk.Color(*[int(val*65535) for val in (r,g,b)])
button = self.wtree.get_widget('colorbutton_linestyle')
button.set_color(color)
r,g,b = colorConverter.to_rgb(line.get_markerfacecolor())
color = Gdk.Color(*[int(val*65535) for val in (r,g,b)])
button = self.wtree.get_widget('colorbutton_markerface')
button.set_color(color)
self._updateson = True
def on_combobox_linestyle_changed(self, item):
self._update()
def on_combobox_marker_changed(self, item):
self._update()
def on_colorbutton_linestyle_color_set(self, button):
self._update()
def on_colorbutton_markerface_color_set(self, button):
'called colorbutton marker clicked'
self._update()
def on_dialog_lineprops_okbutton_clicked(self, button):
self._update()
self.dlg.hide()
def on_dialog_lineprops_cancelbutton_clicked(self, button):
self.dlg.hide()
# Define the file to use as the GTk icon
if sys.platform == 'win32':
icon_filename = 'matplotlib.png'
else:
icon_filename = 'matplotlib.svg'
window_icon = os.path.join(matplotlib.rcParams['datapath'], 'images', icon_filename)
def error_msg_gtk(msg, parent=None):
if parent is not None: # find the toplevel Gtk.Window
parent = parent.get_toplevel()
if not parent.is_toplevel():
parent = None
if not is_string_like(msg):
msg = ','.join(map(str,msg))
dialog = Gtk.MessageDialog(
parent = parent,
type = Gtk.MessageType.ERROR,
buttons = Gtk.ButtonsType.OK,
message_format = msg)
dialog.run()
dialog.destroy()
backend_tools.ToolSaveFigure = SaveFigureGTK3
backend_tools.ToolConfigureSubplots = ConfigureSubplotsGTK3
backend_tools.ToolSetCursor = SetCursorGTK3
backend_tools.ToolRubberband = RubberbandGTK3
Toolbar = ToolbarGTK3
FigureCanvas = FigureCanvasGTK3
FigureManager = FigureManagerGTK3
| apache-2.0 |
dssg/wikienergy | disaggregator/build/pandas/pandas/stats/tests/test_moments.py | 3 | 89255 | import nose
import sys
import functools
import warnings
from datetime import datetime
from numpy.random import randn
from numpy.testing.decorators import slow
import numpy as np
from distutils.version import LooseVersion
from pandas import Series, DataFrame, Panel, bdate_range, isnull, notnull, concat
from pandas.util.testing import (
assert_almost_equal, assert_series_equal, assert_frame_equal, assert_panel_equal, assert_index_equal
)
import pandas.core.datetools as datetools
import pandas.stats.moments as mom
import pandas.util.testing as tm
from pandas.compat import range, zip, PY3, StringIO
N, K = 100, 10
class Base(tm.TestCase):
_multiprocess_can_split_ = True
_nan_locs = np.arange(20, 40)
_inf_locs = np.array([])
def _create_data(self):
arr = randn(N)
arr[self._nan_locs] = np.NaN
self.arr = arr
self.rng = bdate_range(datetime(2009, 1, 1), periods=N)
self.series = Series(arr.copy(), index=self.rng)
self.frame = DataFrame(randn(N, K), index=self.rng,
columns=np.arange(K))
class TestMoments(Base):
def setUp(self):
self._create_data()
warnings.simplefilter("ignore", category=FutureWarning)
def test_centered_axis_validation(self):
# ok
mom.rolling_mean(Series(np.ones(10)),3,center=True ,axis=0)
# bad axis
self.assertRaises(ValueError, mom.rolling_mean,Series(np.ones(10)),3,center=True ,axis=1)
# ok ok
mom.rolling_mean(DataFrame(np.ones((10,10))),3,center=True ,axis=0)
mom.rolling_mean(DataFrame(np.ones((10,10))),3,center=True ,axis=1)
# bad axis
self.assertRaises(ValueError, mom.rolling_mean,DataFrame(np.ones((10,10))),3,center=True ,axis=2)
def test_rolling_sum(self):
self._check_moment_func(mom.rolling_sum, np.sum)
def test_rolling_count(self):
counter = lambda x: np.isfinite(x).astype(float).sum()
self._check_moment_func(mom.rolling_count, counter,
has_min_periods=False,
preserve_nan=False,
fill_value=0)
def test_rolling_mean(self):
self._check_moment_func(mom.rolling_mean, np.mean)
def test_cmov_mean(self):
# GH 8238
tm._skip_if_no_scipy()
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49,
16.68, 9.48, 10.63, 14.48])
xp = np.array([np.nan, np.nan, 9.962, 11.27 , 11.564, 12.516,
12.818, 12.952, np.nan, np.nan])
rs = mom.rolling_mean(vals, 5, center=True)
assert_almost_equal(xp, rs)
xp = Series(rs)
rs = mom.rolling_mean(Series(vals), 5, center=True)
assert_series_equal(xp, rs)
def test_cmov_window(self):
# GH 8238
tm._skip_if_no_scipy()
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81,
13.49, 16.68, 9.48, 10.63, 14.48])
xp = np.array([np.nan, np.nan, 9.962, 11.27 , 11.564, 12.516,
12.818, 12.952, np.nan, np.nan])
rs = mom.rolling_window(vals, 5, 'boxcar', center=True)
assert_almost_equal(xp, rs)
xp = Series(rs)
rs = mom.rolling_window(Series(vals), 5, 'boxcar', center=True)
assert_series_equal(xp, rs)
def test_cmov_window_corner(self):
# GH 8238
tm._skip_if_no_scipy()
# all nan
vals = np.empty(10, dtype=float)
vals.fill(np.nan)
rs = mom.rolling_window(vals, 5, 'boxcar', center=True)
self.assertTrue(np.isnan(rs).all())
# empty
vals = np.array([])
rs = mom.rolling_window(vals, 5, 'boxcar', center=True)
self.assertEqual(len(rs), 0)
# shorter than window
vals = np.random.randn(5)
rs = mom.rolling_window(vals, 10, 'boxcar')
self.assertTrue(np.isnan(rs).all())
self.assertEqual(len(rs), 5)
def test_cmov_window_frame(self):
# Gh 8238
tm._skip_if_no_scipy()
vals = np.array([[ 12.18, 3.64],
[ 10.18, 9.16],
[ 13.24, 14.61],
[ 4.51, 8.11],
[ 6.15, 11.44],
[ 9.14, 6.21],
[ 11.31, 10.67],
[ 2.94, 6.51],
[ 9.42, 8.39],
[ 12.44, 7.34 ]])
xp = np.array([[ np.nan, np.nan],
[ np.nan, np.nan],
[ 9.252, 9.392],
[ 8.644, 9.906],
[ 8.87 , 10.208],
[ 6.81 , 8.588],
[ 7.792, 8.644],
[ 9.05 , 7.824],
[ np.nan, np.nan],
[ np.nan, np.nan]])
# DataFrame
rs = mom.rolling_window(DataFrame(vals), 5, 'boxcar', center=True)
assert_frame_equal(DataFrame(xp), rs)
def test_cmov_window_na_min_periods(self):
tm._skip_if_no_scipy()
# min_periods
vals = Series(np.random.randn(10))
vals[4] = np.nan
vals[8] = np.nan
xp = mom.rolling_mean(vals, 5, min_periods=4, center=True)
rs = mom.rolling_window(vals, 5, 'boxcar', min_periods=4, center=True)
assert_series_equal(xp, rs)
def test_cmov_window_regular(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81,
13.49, 16.68, 9.48, 10.63, 14.48])
xps = {
'hamming': [np.nan, np.nan, 8.71384, 9.56348, 12.38009,
14.03687, 13.8567, 11.81473, np.nan, np.nan],
'triang': [np.nan, np.nan, 9.28667, 10.34667, 12.00556,
13.33889, 13.38, 12.33667, np.nan, np.nan],
'barthann': [np.nan, np.nan, 8.4425, 9.1925, 12.5575,
14.3675, 14.0825, 11.5675, np.nan, np.nan],
'bohman': [np.nan, np.nan, 7.61599, 9.1764, 12.83559,
14.17267, 14.65923, 11.10401, np.nan, np.nan],
'blackmanharris': [np.nan, np.nan, 6.97691, 9.16438, 13.05052,
14.02156, 15.10512, 10.74574, np.nan, np.nan],
'nuttall': [np.nan, np.nan, 7.04618, 9.16786, 13.02671,
14.03559, 15.05657, 10.78514, np.nan, np.nan],
'blackman': [np.nan, np.nan, 7.73345, 9.17869, 12.79607,
14.20036, 14.57726, 11.16988, np.nan, np.nan],
'bartlett': [np.nan, np.nan, 8.4425, 9.1925, 12.5575,
14.3675, 14.0825, 11.5675, np.nan, np.nan]}
for wt in win_types:
xp = Series(xps[wt])
rs = mom.rolling_window(Series(vals), 5, wt, center=True)
assert_series_equal(xp, rs)
def test_cmov_window_regular_linear_range(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
for wt in win_types:
rs = mom.rolling_window(Series(vals), 5, wt, center=True)
assert_series_equal(xp, rs)
def test_cmov_window_regular_missing_data(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81,
13.49, 16.68, np.nan, 10.63, 14.48])
xps = {
'bartlett': [np.nan, np.nan, 9.70333, 10.5225, 8.4425,
9.1925, 12.5575, 14.3675, 15.61667, 13.655],
'blackman': [np.nan, np.nan, 9.04582, 11.41536, 7.73345,
9.17869, 12.79607, 14.20036, 15.8706, 13.655],
'barthann': [np.nan, np.nan, 9.70333, 10.5225, 8.4425,
9.1925, 12.5575, 14.3675, 15.61667, 13.655],
'bohman': [np.nan, np.nan, 8.9444, 11.56327, 7.61599,
9.1764, 12.83559, 14.17267, 15.90976, 13.655],
'hamming': [np.nan, np.nan, 9.59321, 10.29694, 8.71384,
9.56348, 12.38009, 14.20565, 15.24694, 13.69758],
'nuttall': [np.nan, np.nan, 8.47693, 12.2821, 7.04618,
9.16786, 13.02671, 14.03673, 16.08759, 13.65553],
'triang': [np.nan, np.nan, 9.33167, 9.76125, 9.28667,
10.34667, 12.00556, 13.82125, 14.49429, 13.765],
'blackmanharris': [np.nan, np.nan, 8.42526, 12.36824, 6.97691,
9.16438, 13.05052, 14.02175, 16.1098,
13.65509]
}
for wt in win_types:
xp = Series(xps[wt])
rs = mom.rolling_window(Series(vals), 5, wt, min_periods=3)
assert_series_equal(xp, rs)
def test_cmov_window_special(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['kaiser', 'gaussian', 'general_gaussian', 'slepian']
kwds = [{'beta': 1.}, {'std': 1.}, {'power': 2., 'width': 2.},
{'width': 0.5}]
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81,
13.49, 16.68, 9.48, 10.63, 14.48])
xps = {
'gaussian': [np.nan, np.nan, 8.97297, 9.76077, 12.24763,
13.89053, 13.65671, 12.01002, np.nan, np.nan],
'general_gaussian': [np.nan, np.nan, 9.85011, 10.71589,
11.73161, 13.08516, 12.95111, 12.74577,
np.nan, np.nan],
'slepian': [np.nan, np.nan, 9.81073, 10.89359, 11.70284,
12.88331, 12.96079, 12.77008, np.nan, np.nan],
'kaiser': [np.nan, np.nan, 9.86851, 11.02969, 11.65161,
12.75129, 12.90702, 12.83757, np.nan, np.nan]
}
for wt, k in zip(win_types, kwds):
xp = Series(xps[wt])
rs = mom.rolling_window(Series(vals), 5, wt, center=True,
**k)
assert_series_equal(xp, rs)
def test_cmov_window_special_linear_range(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['kaiser', 'gaussian', 'general_gaussian', 'slepian']
kwds = [{'beta': 1.}, {'std': 1.}, {'power': 2., 'width': 2.},
{'width': 0.5}]
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
for wt, k in zip(win_types, kwds):
rs = mom.rolling_window(Series(vals), 5, wt, center=True,
**k)
assert_series_equal(xp, rs)
def test_rolling_median(self):
self._check_moment_func(mom.rolling_median, np.median)
def test_rolling_min(self):
self._check_moment_func(mom.rolling_min, np.min)
a = np.array([1, 2, 3, 4, 5])
b = mom.rolling_min(a, window=100, min_periods=1)
assert_almost_equal(b, np.ones(len(a)))
self.assertRaises(ValueError, mom.rolling_min, np.array([1,
2, 3]), window=3, min_periods=5)
def test_rolling_max(self):
self._check_moment_func(mom.rolling_max, np.max)
a = np.array([1, 2, 3, 4, 5])
b = mom.rolling_max(a, window=100, min_periods=1)
assert_almost_equal(a, b)
self.assertRaises(ValueError, mom.rolling_max, np.array([1,
2, 3]), window=3, min_periods=5)
def test_rolling_quantile(self):
qs = [.1, .5, .9]
def scoreatpercentile(a, per):
values = np.sort(a, axis=0)
idx = per / 1. * (values.shape[0] - 1)
return values[int(idx)]
for q in qs:
def f(x, window, min_periods=None, freq=None, center=False):
return mom.rolling_quantile(x, window, q,
min_periods=min_periods,
freq=freq,
center=center)
def alt(x):
return scoreatpercentile(x, q)
self._check_moment_func(f, alt)
def test_rolling_apply(self):
# suppress warnings about empty slices, as we are deliberately testing with a 0-length Series
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning)
ser = Series([])
assert_series_equal(ser, mom.rolling_apply(ser, 10, lambda x: x.mean()))
def roll_mean(x, window, min_periods=None, freq=None, center=False):
return mom.rolling_apply(x, window,
lambda x: x[np.isfinite(x)].mean(),
min_periods=min_periods,
freq=freq,
center=center)
self._check_moment_func(roll_mean, np.mean)
# GH 8080
s = Series([None, None, None])
result = mom.rolling_apply(s, 2, lambda x: len(x), min_periods=0)
expected = Series([1., 2., 2.])
assert_series_equal(result, expected)
def test_rolling_apply_out_of_bounds(self):
# #1850
arr = np.arange(4)
# it works!
result = mom.rolling_apply(arr, 10, np.sum)
self.assertTrue(isnull(result).all())
result = mom.rolling_apply(arr, 10, np.sum, min_periods=1)
assert_almost_equal(result, result)
def test_rolling_std(self):
self._check_moment_func(mom.rolling_std,
lambda x: np.std(x, ddof=1))
self._check_moment_func(functools.partial(mom.rolling_std, ddof=0),
lambda x: np.std(x, ddof=0))
def test_rolling_std_1obs(self):
result = mom.rolling_std(np.array([1., 2., 3., 4., 5.]),
1, min_periods=1)
expected = np.array([np.nan] * 5)
assert_almost_equal(result, expected)
result = mom.rolling_std(np.array([1., 2., 3., 4., 5.]),
1, min_periods=1, ddof=0)
expected = np.zeros(5)
assert_almost_equal(result, expected)
result = mom.rolling_std(np.array([np.nan, np.nan, 3., 4., 5.]),
3, min_periods=2)
self.assertTrue(np.isnan(result[2]))
def test_rolling_std_neg_sqrt(self):
# unit test from Bottleneck
# Test move_nanstd for neg sqrt.
a = np.array([0.0011448196318903589,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767])
b = mom.rolling_std(a, window=3)
self.assertTrue(np.isfinite(b[2:]).all())
b = mom.ewmstd(a, span=3)
self.assertTrue(np.isfinite(b[2:]).all())
def test_rolling_var(self):
self._check_moment_func(mom.rolling_var,
lambda x: np.var(x, ddof=1),
test_stable=True)
self._check_moment_func(functools.partial(mom.rolling_var, ddof=0),
lambda x: np.var(x, ddof=0))
def test_rolling_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest('no scipy')
self._check_moment_func(mom.rolling_skew,
lambda x: skew(x, bias=False))
def test_rolling_kurt(self):
try:
from scipy.stats import kurtosis
except ImportError:
raise nose.SkipTest('no scipy')
self._check_moment_func(mom.rolling_kurt,
lambda x: kurtosis(x, bias=False))
def test_fperr_robustness(self):
# TODO: remove this once python 2.5 out of picture
if PY3:
raise nose.SkipTest("doesn't work on python 3")
# #2114
data = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1a@\xaa\xaa\xaa\xaa\xaa\xaa\x02@8\x8e\xe38\x8e\xe3\xe8?z\t\xed%\xb4\x97\xd0?\xa2\x0c<\xdd\x9a\x1f\xb6?\x82\xbb\xfa&y\x7f\x9d?\xac\'\xa7\xc4P\xaa\x83?\x90\xdf\xde\xb0k8j?`\xea\xe9u\xf2zQ?*\xe37\x9d\x98N7?\xe2.\xf5&v\x13\x1f?\xec\xc9\xf8\x19\xa4\xb7\x04?\x90b\xf6w\x85\x9f\xeb>\xb5A\xa4\xfaXj\xd2>F\x02\xdb\xf8\xcb\x8d\xb8>.\xac<\xfb\x87^\xa0>\xe8:\xa6\xf9_\xd3\x85>\xfb?\xe2cUU\xfd?\xfc\x7fA\xed8\x8e\xe3?\xa5\xaa\xac\x91\xf6\x12\xca?n\x1cs\xb6\xf9a\xb1?\xe8%D\xf3L-\x97?5\xddZD\x11\xe7~?#>\xe7\x82\x0b\x9ad?\xd9R4Y\x0fxK?;7x;\nP2?N\xf4JO\xb8j\x18?4\xf81\x8a%G\x00?\x9a\xf5\x97\r2\xb4\xe5>\xcd\x9c\xca\xbcB\xf0\xcc>3\x13\x87(\xd7J\xb3>\x99\x19\xb4\xe0\x1e\xb9\x99>ff\xcd\x95\x14&\x81>\x88\x88\xbc\xc7p\xddf>`\x0b\xa6_\x96|N>@\xb2n\xea\x0eS4>U\x98\x938i\x19\x1b>\x8eeb\xd0\xf0\x10\x02>\xbd\xdc-k\x96\x16\xe8=(\x93\x1e\xf2\x0e\x0f\xd0=\xe0n\xd3Bii\xb5=*\xe9\x19Y\x8c\x8c\x9c=\xc6\xf0\xbb\x90]\x08\x83=]\x96\xfa\xc0|`i=>d\xfc\xd5\xfd\xeaP=R0\xfb\xc7\xa7\x8e6=\xc2\x95\xf9_\x8a\x13\x1e=\xd6c\xa6\xea\x06\r\x04=r\xda\xdd8\t\xbc\xea<\xf6\xe6\x93\xd0\xb0\xd2\xd1<\x9d\xdeok\x96\xc3\xb7<&~\xea9s\xaf\x9f<UUUUUU\x13@q\x1c\xc7q\x1c\xc7\xf9?\xf6\x12\xdaKh/\xe1?\xf2\xc3"e\xe0\xe9\xc6?\xed\xaf\x831+\x8d\xae?\xf3\x1f\xad\xcb\x1c^\x94?\x15\x1e\xdd\xbd>\xb8\x02@\xc6\xd2&\xfd\xa8\xf5\xe8?\xd9\xe1\x19\xfe\xc5\xa3\xd0?v\x82"\xa8\xb2/\xb6?\x9dX\x835\xee\x94\x9d?h\x90W\xce\x9e\xb8\x83?\x8a\xc0th~Kj?\\\x80\xf8\x9a\xa9\x87Q?%\xab\xa0\xce\x8c_7?1\xe4\x80\x13\x11*\x1f? \x98\x00\r\xb6\xc6\x04?\x80u\xabf\x9d\xb3\xeb>UNrD\xbew\xd2>\x1c\x13C[\xa8\x9f\xb8>\x12b\xd7<pj\xa0>m-\x1fQ@\xe3\x85>\xe6\x91)l\x00/m>Da\xc6\xf2\xaatS>\x05\xd7]\xee\xe3\xf09>'
arr = np.frombuffer(data, dtype='<f8')
if sys.byteorder != "little":
arr = arr.byteswap().newbyteorder()
result = mom.rolling_sum(arr, 2)
self.assertTrue((result[1:] >= 0).all())
result = mom.rolling_mean(arr, 2)
self.assertTrue((result[1:] >= 0).all())
result = mom.rolling_var(arr, 2)
self.assertTrue((result[1:] >= 0).all())
# #2527, ugh
arr = np.array([0.00012456, 0.0003, 0])
result = mom.rolling_mean(arr, 1)
self.assertTrue(result[-1] >= 0)
result = mom.rolling_mean(-arr, 1)
self.assertTrue(result[-1] <= 0)
def _check_moment_func(self, func, static_comp, window=50,
has_min_periods=True,
has_center=True,
has_time_rule=True,
preserve_nan=True,
fill_value=None,
test_stable=False):
self._check_ndarray(func, static_comp, window=window,
has_min_periods=has_min_periods,
preserve_nan=preserve_nan,
has_center=has_center,
fill_value=fill_value,
test_stable=test_stable)
self._check_structures(func, static_comp,
has_min_periods=has_min_periods,
has_time_rule=has_time_rule,
fill_value=fill_value,
has_center=has_center)
def _check_ndarray(self, func, static_comp, window=50,
has_min_periods=True,
preserve_nan=True,
has_center=True,
fill_value=None,
test_stable=False,
test_window=True):
result = func(self.arr, window)
assert_almost_equal(result[-1],
static_comp(self.arr[-50:]))
if preserve_nan:
assert(np.isnan(result[self._nan_locs]).all())
# excluding NaNs correctly
arr = randn(50)
arr[:10] = np.NaN
arr[-10:] = np.NaN
if has_min_periods:
result = func(arr, 50, min_periods=30)
assert_almost_equal(result[-1], static_comp(arr[10:-10]))
# min_periods is working correctly
result = func(arr, 20, min_periods=15)
self.assertTrue(np.isnan(result[23]))
self.assertFalse(np.isnan(result[24]))
self.assertFalse(np.isnan(result[-6]))
self.assertTrue(np.isnan(result[-5]))
arr2 = randn(20)
result = func(arr2, 10, min_periods=5)
self.assertTrue(isnull(result[3]))
self.assertTrue(notnull(result[4]))
# min_periods=0
result0 = func(arr, 20, min_periods=0)
result1 = func(arr, 20, min_periods=1)
assert_almost_equal(result0, result1)
else:
result = func(arr, 50)
assert_almost_equal(result[-1], static_comp(arr[10:-10]))
# GH 7925
if has_center:
if has_min_periods:
result = func(arr, 20, min_periods=15, center=True)
expected = func(np.concatenate((arr, np.array([np.NaN] * 9))), 20, min_periods=15)[9:]
else:
result = func(arr, 20, center=True)
expected = func(np.concatenate((arr, np.array([np.NaN] * 9))), 20)[9:]
self.assert_numpy_array_equivalent(result, expected)
if test_stable:
result = func(self.arr + 1e9, window)
assert_almost_equal(result[-1],
static_comp(self.arr[-50:] + 1e9))
# Test window larger than array, #7297
if test_window:
if has_min_periods:
for minp in (0, len(self.arr)-1, len(self.arr)):
result = func(self.arr, len(self.arr)+1, min_periods=minp)
expected = func(self.arr, len(self.arr), min_periods=minp)
nan_mask = np.isnan(result)
self.assertTrue(np.array_equal(nan_mask,
np.isnan(expected)))
nan_mask = ~nan_mask
assert_almost_equal(result[nan_mask], expected[nan_mask])
else:
result = func(self.arr, len(self.arr)+1)
expected = func(self.arr, len(self.arr))
nan_mask = np.isnan(result)
self.assertTrue(np.array_equal(nan_mask, np.isnan(expected)))
nan_mask = ~nan_mask
assert_almost_equal(result[nan_mask], expected[nan_mask])
def _check_structures(self, func, static_comp,
has_min_periods=True, has_time_rule=True,
has_center=True,
fill_value=None):
series_result = func(self.series, 50)
tm.assert_isinstance(series_result, Series)
frame_result = func(self.frame, 50)
self.assertEqual(type(frame_result), DataFrame)
# check time_rule works
if has_time_rule:
win = 25
minp = 10
if has_min_periods:
series_result = func(self.series[::2], win, min_periods=minp,
freq='B')
frame_result = func(self.frame[::2], win, min_periods=minp,
freq='B')
else:
series_result = func(self.series[::2], win, freq='B')
frame_result = func(self.frame[::2], win, freq='B')
last_date = series_result.index[-1]
prev_date = last_date - 24 * datetools.bday
trunc_series = self.series[::2].truncate(prev_date, last_date)
trunc_frame = self.frame[::2].truncate(prev_date, last_date)
assert_almost_equal(series_result[-1], static_comp(trunc_series))
assert_almost_equal(frame_result.xs(last_date),
trunc_frame.apply(static_comp))
# GH 7925
if has_center:
if has_min_periods:
minp = 10
series_xp = func(self.series.reindex(list(self.series.index)+['x%d'%x for x in range(12)]), 25, min_periods=minp).shift(-12).reindex(self.series.index)
frame_xp = func(self.frame.reindex(list(self.frame.index)+['x%d'%x for x in range(12)]), 25, min_periods=minp).shift(-12).reindex(self.frame.index)
series_rs = func(self.series, 25, min_periods=minp,
center=True)
frame_rs = func(self.frame, 25, min_periods=minp,
center=True)
else:
series_xp = func(self.series.reindex(list(self.series.index)+['x%d'%x for x in range(12)]), 25).shift(-12).reindex(self.series.index)
frame_xp = func(self.frame.reindex(list(self.frame.index)+['x%d'%x for x in range(12)]), 25).shift(-12).reindex(self.frame.index)
series_rs = func(self.series, 25, center=True)
frame_rs = func(self.frame, 25, center=True)
if fill_value is not None:
series_xp = series_xp.fillna(fill_value)
frame_xp = frame_xp.fillna(fill_value)
assert_series_equal(series_xp, series_rs)
assert_frame_equal(frame_xp, frame_rs)
def test_ewma(self):
self._check_ew(mom.ewma)
arr = np.zeros(1000)
arr[5] = 1
result = mom.ewma(arr, span=100, adjust=False).sum()
self.assertTrue(np.abs(result - 1) < 1e-2)
s = Series([1.0, 2.0, 4.0, 8.0])
expected = Series([1.0, 1.6, 2.736842, 4.923077])
for f in [lambda s: mom.ewma(s, com=2.0, adjust=True),
lambda s: mom.ewma(s, com=2.0, adjust=True, ignore_na=False),
lambda s: mom.ewma(s, com=2.0, adjust=True, ignore_na=True),
]:
result = f(s)
assert_series_equal(result, expected)
expected = Series([1.0, 1.333333, 2.222222, 4.148148])
for f in [lambda s: mom.ewma(s, com=2.0, adjust=False),
lambda s: mom.ewma(s, com=2.0, adjust=False, ignore_na=False),
lambda s: mom.ewma(s, com=2.0, adjust=False, ignore_na=True),
]:
result = f(s)
assert_series_equal(result, expected)
def test_ewma_nan_handling(self):
s = Series([1.] + [np.nan] * 5 + [1.])
result = mom.ewma(s, com=5)
assert_almost_equal(result, [1.] * len(s))
s = Series([np.nan] * 2 + [1.] + [np.nan] * 2 + [1.])
result = mom.ewma(s, com=5)
assert_almost_equal(result, [np.nan] * 2 + [1.] * 4)
# GH 7603
s0 = Series([np.nan, 1., 101.])
s1 = Series([1., np.nan, 101.])
s2 = Series([np.nan, 1., np.nan, np.nan, 101., np.nan])
s3 = Series([1., np.nan, 101., 50.])
com = 2.
alpha = 1. / (1. + com)
def simple_wma(s, w):
return (s.multiply(w).cumsum() / w.cumsum()).fillna(method='ffill')
for (s, adjust, ignore_na, w) in [
(s0, True, False, [np.nan, (1. - alpha), 1.]),
(s0, True, True, [np.nan, (1. - alpha), 1.]),
(s0, False, False, [np.nan, (1. - alpha), alpha]),
(s0, False, True, [np.nan, (1. - alpha), alpha]),
(s1, True, False, [(1. - alpha)**2, np.nan, 1.]),
(s1, True, True, [(1. - alpha), np.nan, 1.]),
(s1, False, False, [(1. - alpha)**2, np.nan, alpha]),
(s1, False, True, [(1. - alpha), np.nan, alpha]),
(s2, True, False, [np.nan, (1. - alpha)**3, np.nan, np.nan, 1., np.nan]),
(s2, True, True, [np.nan, (1. - alpha), np.nan, np.nan, 1., np.nan]),
(s2, False, False, [np.nan, (1. - alpha)**3, np.nan, np.nan, alpha, np.nan]),
(s2, False, True, [np.nan, (1. - alpha), np.nan, np.nan, alpha, np.nan]),
(s3, True, False, [(1. - alpha)**3, np.nan, (1. - alpha), 1.]),
(s3, True, True, [(1. - alpha)**2, np.nan, (1. - alpha), 1.]),
(s3, False, False, [(1. - alpha)**3, np.nan, (1. - alpha) * alpha, alpha * ((1. - alpha)**2 + alpha)]),
(s3, False, True, [(1. - alpha)**2, np.nan, (1. - alpha) * alpha, alpha]),
]:
expected = simple_wma(s, Series(w))
result = mom.ewma(s, com=com, adjust=adjust, ignore_na=ignore_na)
assert_series_equal(result, expected)
if ignore_na is False:
# check that ignore_na defaults to False
result = mom.ewma(s, com=com, adjust=adjust)
assert_series_equal(result, expected)
def test_ewmvar(self):
self._check_ew(mom.ewmvar)
def test_ewmvol(self):
self._check_ew(mom.ewmvol)
def test_ewma_span_com_args(self):
A = mom.ewma(self.arr, com=9.5)
B = mom.ewma(self.arr, span=20)
assert_almost_equal(A, B)
self.assertRaises(Exception, mom.ewma, self.arr, com=9.5, span=20)
self.assertRaises(Exception, mom.ewma, self.arr)
def test_ewma_halflife_arg(self):
A = mom.ewma(self.arr, com=13.932726172912965)
B = mom.ewma(self.arr, halflife=10.0)
assert_almost_equal(A, B)
self.assertRaises(Exception, mom.ewma, self.arr, span=20, halflife=50)
self.assertRaises(Exception, mom.ewma, self.arr, com=9.5, halflife=50)
self.assertRaises(Exception, mom.ewma, self.arr, com=9.5, span=20, halflife=50)
self.assertRaises(Exception, mom.ewma, self.arr)
def test_ew_empty_arrays(self):
arr = np.array([], dtype=np.float64)
funcs = [mom.ewma, mom.ewmvol, mom.ewmvar]
for f in funcs:
result = f(arr, 3)
assert_almost_equal(result, arr)
def _check_ew(self, func):
self._check_ew_ndarray(func)
self._check_ew_structures(func)
def _check_ew_ndarray(self, func, preserve_nan=False):
result = func(self.arr, com=10)
if preserve_nan:
assert(np.isnan(result[self._nan_locs]).all())
# excluding NaNs correctly
arr = randn(50)
arr[:10] = np.NaN
arr[-10:] = np.NaN
s = Series(arr)
# check min_periods
# GH 7898
result = func(s, 50, min_periods=2)
self.assertTrue(np.isnan(result.values[:11]).all())
self.assertFalse(np.isnan(result.values[11:]).any())
for min_periods in (0, 1):
result = func(s, 50, min_periods=min_periods)
if func == mom.ewma:
self.assertTrue(np.isnan(result.values[:10]).all())
self.assertFalse(np.isnan(result.values[10:]).any())
else:
# ewmstd, ewmvol, ewmvar (with bias=False) require at least two values
self.assertTrue(np.isnan(result.values[:11]).all())
self.assertFalse(np.isnan(result.values[11:]).any())
# check series of length 0
result = func(Series([]), 50, min_periods=min_periods)
assert_series_equal(result, Series([]))
# check series of length 1
result = func(Series([1.]), 50, min_periods=min_periods)
if func == mom.ewma:
assert_series_equal(result, Series([1.]))
else:
# ewmstd, ewmvol, ewmvar with bias=False require at least two values
assert_series_equal(result, Series([np.NaN]))
# pass in ints
result2 = func(np.arange(50), span=10)
self.assertEqual(result2.dtype, np.float_)
def _check_ew_structures(self, func):
series_result = func(self.series, com=10)
tm.assert_isinstance(series_result, Series)
frame_result = func(self.frame, com=10)
self.assertEqual(type(frame_result), DataFrame)
# create the data only once as we are not setting it
def _create_consistency_data():
def create_series():
return [Series(),
Series([np.nan]),
Series([np.nan, np.nan]),
Series([3.]),
Series([np.nan, 3.]),
Series([3., np.nan]),
Series([1., 3.]),
Series([2., 2.]),
Series([3., 1.]),
Series([5., 5., 5., 5., np.nan, np.nan, np.nan, 5., 5., np.nan, np.nan]),
Series([np.nan, 5., 5., 5., np.nan, np.nan, np.nan, 5., 5., np.nan, np.nan]),
Series([np.nan, np.nan, 5., 5., np.nan, np.nan, np.nan, 5., 5., np.nan, np.nan]),
Series([np.nan, 3., np.nan, 3., 4., 5., 6., np.nan, np.nan, 7., 12., 13., 14., 15.]),
Series([np.nan, 5., np.nan, 2., 4., 0., 9., np.nan, np.nan, 3., 12., 13., 14., 15.]),
Series([2., 3., np.nan, 3., 4., 5., 6., np.nan, np.nan, 7., 12., 13., 14., 15.]),
Series([2., 5., np.nan, 2., 4., 0., 9., np.nan, np.nan, 3., 12., 13., 14., 15.]),
Series(range(10)),
Series(range(20, 0, -2)),
]
def create_dataframes():
return [DataFrame(),
DataFrame(columns=['a']),
DataFrame(columns=['a', 'a']),
DataFrame(columns=['a', 'b']),
DataFrame(np.arange(10).reshape((5, 2))),
DataFrame(np.arange(25).reshape((5, 5))),
DataFrame(np.arange(25).reshape((5, 5)), columns=['a', 'b', 99, 'd', 'd']),
] + [DataFrame(s) for s in create_series()]
def is_constant(x):
values = x.values.ravel()
return len(set(values[notnull(values)])) == 1
def no_nans(x):
return x.notnull().all().all()
# data is a tuple(object, is_contant, no_nans)
data = create_series() + create_dataframes()
return [ (x, is_constant(x), no_nans(x)) for x in data ]
_consistency_data = _create_consistency_data()
class TestMomentsConsistency(Base):
def _create_data(self):
super(TestMomentsConsistency, self)._create_data()
self.data = _consistency_data
def setUp(self):
self._create_data()
warnings.simplefilter("ignore", category=FutureWarning)
def _test_moments_consistency(self,
min_periods,
count, mean, mock_mean, corr,
var_unbiased=None, std_unbiased=None, cov_unbiased=None,
var_biased=None, std_biased=None, cov_biased=None,
var_debiasing_factors=None):
def _non_null_values(x):
values = x.values.ravel()
return set(values[notnull(values)].tolist())
for (x, is_constant, no_nans) in self.data:
assert_equal = assert_series_equal if isinstance(x, Series) else assert_frame_equal
count_x = count(x)
mean_x = mean(x)
if mock_mean:
# check that mean equals mock_mean
expected = mock_mean(x)
assert_equal(mean_x, expected)
# check that correlation of a series with itself is either 1 or NaN
corr_x_x = corr(x, x)
# self.assertTrue(_non_null_values(corr_x_x).issubset(set([1.]))) # restore once rolling_cov(x, x) is identically equal to var(x)
if is_constant:
# check mean of constant series
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = x.max().max()
assert_equal(mean_x, expected)
# check correlation of constant series with itself is NaN
expected[:] = np.nan
assert_equal(corr_x_x, expected)
if var_unbiased and var_biased and var_debiasing_factors:
# check variance debiasing factors
var_unbiased_x = var_unbiased(x)
var_biased_x = var_biased(x)
var_debiasing_factors_x = var_debiasing_factors(x)
assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x)
for (std, var, cov) in [(std_biased, var_biased, cov_biased),
(std_unbiased, var_unbiased, cov_unbiased)]:
# check that var(x), std(x), and cov(x) are all >= 0
var_x = var(x)
std_x = std(x)
self.assertFalse((var_x < 0).any().any())
self.assertFalse((std_x < 0).any().any())
if cov:
cov_x_x = cov(x, x)
self.assertFalse((cov_x_x < 0).any().any())
# check that var(x) == cov(x, x)
assert_equal(var_x, cov_x_x)
# check that var(x) == std(x)^2
assert_equal(var_x, std_x * std_x)
if var is var_biased:
# check that biased var(x) == mean(x^2) - mean(x)^2
mean_x2 = mean(x * x)
assert_equal(var_x, mean_x2 - (mean_x * mean_x))
if is_constant:
# check that variance of constant series is identically 0
self.assertFalse((var_x > 0).any().any())
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = 0.
if var is var_unbiased:
expected[count_x < 2] = np.nan
assert_equal(var_x, expected)
if isinstance(x, Series):
for (y, is_constant, no_nans) in self.data:
if not x.isnull().equals(y.isnull()):
# can only easily test two Series with similar structure
continue
# check that cor(x, y) is symmetric
corr_x_y = corr(x, y)
corr_y_x = corr(y, x)
assert_equal(corr_x_y, corr_y_x)
if cov:
# check that cov(x, y) is symmetric
cov_x_y = cov(x, y)
cov_y_x = cov(y, x)
assert_equal(cov_x_y, cov_y_x)
# check that cov(x, y) == (var(x+y) - var(x) - var(y)) / 2
var_x_plus_y = var(x + y)
var_y = var(y)
assert_equal(cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y))
# check that corr(x, y) == cov(x, y) / (std(x) * std(y))
std_y = std(y)
assert_equal(corr_x_y, cov_x_y / (std_x * std_y))
if cov is cov_biased:
# check that biased cov(x, y) == mean(x*y) - mean(x)*mean(y)
mean_y = mean(y)
mean_x_times_y = mean(x * y)
assert_equal(cov_x_y, mean_x_times_y - (mean_x * mean_y))
@slow
def test_ewm_consistency(self):
def _weights(s, com, adjust, ignore_na):
if isinstance(s, DataFrame):
if not len(s.columns):
return DataFrame(index=s.index, columns=s.columns)
w = concat([ _weights(s.iloc[:, i],
com=com,
adjust=adjust,
ignore_na=ignore_na) for i, _ in enumerate(s.columns) ],
axis=1)
w.index=s.index
w.columns=s.columns
return w
w = Series(np.nan, index=s.index)
alpha = 1. / (1. + com)
if ignore_na:
w[s.notnull()] = _weights(s[s.notnull()], com=com, adjust=adjust, ignore_na=False)
elif adjust:
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
w.iat[i] = pow(1. / (1. - alpha), i)
else:
sum_wts = 0.
prev_i = -1
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
if prev_i == -1:
w.iat[i] = 1.
else:
w.iat[i] = alpha * sum_wts / pow(1. - alpha, i - prev_i)
sum_wts += w.iat[i]
prev_i = i
return w
def _variance_debiasing_factors(s, com, adjust, ignore_na):
weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na)
cum_sum = weights.cumsum().fillna(method='ffill')
cum_sum_sq = (weights * weights).cumsum().fillna(method='ffill')
numerator = cum_sum * cum_sum
denominator = numerator - cum_sum_sq
denominator[denominator <= 0.] = np.nan
return numerator / denominator
def _ewma(s, com, min_periods, adjust, ignore_na):
weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na)
result = s.multiply(weights).cumsum().divide(weights.cumsum()).fillna(method='ffill')
result[mom.expanding_count(s) < (max(min_periods, 1) if min_periods else 1)] = np.nan
return result
com = 3.
for min_periods in [0, 1, 2, 3, 4]:
for adjust in [True, False]:
for ignore_na in [False, True]:
# test consistency between different ewm* moments
self._test_moments_consistency(
min_periods=min_periods,
count=mom.expanding_count,
mean=lambda x: mom.ewma(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na),
mock_mean=lambda x: _ewma(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na),
corr=lambda x, y: mom.ewmcorr(x, y, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na),
var_unbiased=lambda x: mom.ewmvar(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=False),
std_unbiased=lambda x: mom.ewmstd(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=False),
cov_unbiased=lambda x, y: mom.ewmcov(x, y, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=False),
var_biased=lambda x: mom.ewmvar(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=True),
std_biased=lambda x: mom.ewmstd(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=True),
cov_biased=lambda x, y: mom.ewmcov(x, y, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=True),
var_debiasing_factors=lambda x: _variance_debiasing_factors(x, com=com, adjust=adjust, ignore_na=ignore_na))
@slow
def test_expanding_consistency(self):
base_functions = [
(mom.expanding_count, lambda v: Series(v).count(), None),
(mom.expanding_max, lambda v: Series(v).max(), None),
(mom.expanding_min, lambda v: Series(v).min(), None),
(mom.expanding_sum, lambda v: Series(v).sum(), None),
(mom.expanding_mean, lambda v: Series(v).mean(), None),
(mom.expanding_std, lambda v: Series(v).std(), 1),
(mom.expanding_cov, lambda v: Series(v).cov(Series(v)), None),
(mom.expanding_corr, lambda v: Series(v).corr(Series(v)), None),
(mom.expanding_var, lambda v: Series(v).var(), 1),
#(mom.expanding_skew, lambda v: Series(v).skew(), 3), # restore once GH 8086 is fixed
#(mom.expanding_kurt, lambda v: Series(v).kurt(), 4), # restore once GH 8086 is fixed
#(lambda x, min_periods: mom.expanding_quantile(x, 0.3, min_periods=min_periods),
# lambda v: Series(v).quantile(0.3), None), # restore once GH 8084 is fixed
(mom.expanding_median, lambda v: Series(v).median(), None),
(mom.expanding_max, np.nanmax, 1),
(mom.expanding_min, np.nanmin, 1),
(mom.expanding_sum, np.nansum, 1),
]
if np.__version__ >= LooseVersion('1.8.0'):
base_functions += [
(mom.expanding_mean, np.nanmean, 1),
(mom.expanding_std, lambda v: np.nanstd(v, ddof=1), 1),
(mom.expanding_var, lambda v: np.nanvar(v, ddof=1), 1),
]
if np.__version__ >= LooseVersion('1.9.0'):
base_functions += [
(mom.expanding_median, np.nanmedian, 1),
]
no_nan_functions = [
(mom.expanding_max, np.max, None),
(mom.expanding_min, np.min, None),
(mom.expanding_sum, np.sum, None),
(mom.expanding_mean, np.mean, None),
(mom.expanding_std, lambda v: np.std(v, ddof=1), 1),
(mom.expanding_var, lambda v: np.var(v, ddof=1), 1),
(mom.expanding_median, np.median, None),
]
# suppress warnings about empty slices, as we are deliberately testing with empty/0-length Series/DataFrames
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning)
for min_periods in [0, 1, 2, 3, 4]:
# test consistency between different expanding_* moments
self._test_moments_consistency(
min_periods=min_periods,
count=mom.expanding_count,
mean=lambda x: mom.expanding_mean(x, min_periods=min_periods),
mock_mean=lambda x: mom.expanding_sum(x, min_periods=min_periods) / mom.expanding_count(x),
corr=lambda x, y: mom.expanding_corr(x, y, min_periods=min_periods),
var_unbiased=lambda x: mom.expanding_var(x, min_periods=min_periods),
std_unbiased=lambda x: mom.expanding_std(x, min_periods=min_periods),
cov_unbiased=lambda x, y: mom.expanding_cov(x, y, min_periods=min_periods),
var_biased=lambda x: mom.expanding_var(x, min_periods=min_periods, ddof=0),
std_biased=lambda x: mom.expanding_std(x, min_periods=min_periods, ddof=0),
cov_biased=lambda x, y: mom.expanding_cov(x, y, min_periods=min_periods, ddof=0),
var_debiasing_factors=lambda x: mom.expanding_count(x) / (mom.expanding_count(x) - 1.).replace(0., np.nan)
)
# test consistency between expanding_xyz() and either (a) expanding_apply of Series.xyz(),
# or (b) expanding_apply of np.nanxyz()
for (x, is_constant, no_nans) in self.data:
assert_equal = assert_series_equal if isinstance(x, Series) else assert_frame_equal
functions = base_functions
# GH 8269
if no_nans:
functions = base_functions + no_nan_functions
for (expanding_f, f, require_min_periods) in functions:
if require_min_periods and (min_periods is not None) and (min_periods < require_min_periods):
continue
if expanding_f is mom.expanding_count:
expanding_f_result = expanding_f(x)
expanding_apply_f_result = mom.expanding_apply(x, func=f, min_periods=0)
else:
if expanding_f in [mom.expanding_cov, mom.expanding_corr]:
expanding_f_result = expanding_f(x, min_periods=min_periods, pairwise=False)
else:
expanding_f_result = expanding_f(x, min_periods=min_periods)
expanding_apply_f_result = mom.expanding_apply(x, func=f, min_periods=min_periods)
assert_equal(expanding_f_result, expanding_apply_f_result)
if (expanding_f in [mom.expanding_cov, mom.expanding_corr]) and isinstance(x, DataFrame):
# test pairwise=True
expanding_f_result = expanding_f(x, x, min_periods=min_periods, pairwise=True)
expected = Panel(items=x.index, major_axis=x.columns, minor_axis=x.columns)
for i, _ in enumerate(x.columns):
for j, _ in enumerate(x.columns):
expected.iloc[:, i, j] = expanding_f(x.iloc[:, i], x.iloc[:, j], min_periods=min_periods)
assert_panel_equal(expanding_f_result, expected)
@slow
def test_rolling_consistency(self):
base_functions = [
(mom.rolling_count, lambda v: Series(v).count(), None),
(mom.rolling_max, lambda v: Series(v).max(), None),
(mom.rolling_min, lambda v: Series(v).min(), None),
(mom.rolling_sum, lambda v: Series(v).sum(), None),
(mom.rolling_mean, lambda v: Series(v).mean(), None),
(mom.rolling_std, lambda v: Series(v).std(), 1),
(mom.rolling_cov, lambda v: Series(v).cov(Series(v)), None),
(mom.rolling_corr, lambda v: Series(v).corr(Series(v)), None),
(mom.rolling_var, lambda v: Series(v).var(), 1),
#(mom.rolling_skew, lambda v: Series(v).skew(), 3), # restore once GH 8086 is fixed
#(mom.rolling_kurt, lambda v: Series(v).kurt(), 4), # restore once GH 8086 is fixed
#(lambda x, window, min_periods, center: mom.rolling_quantile(x, window, 0.3, min_periods=min_periods, center=center),
# lambda v: Series(v).quantile(0.3), None), # restore once GH 8084 is fixed
(mom.rolling_median, lambda v: Series(v).median(), None),
(mom.rolling_max, np.nanmax, 1),
(mom.rolling_min, np.nanmin, 1),
(mom.rolling_sum, np.nansum, 1),
]
if np.__version__ >= LooseVersion('1.8.0'):
base_functions += [
(mom.rolling_mean, np.nanmean, 1),
(mom.rolling_std, lambda v: np.nanstd(v, ddof=1), 1),
(mom.rolling_var, lambda v: np.nanvar(v, ddof=1), 1),
]
if np.__version__ >= LooseVersion('1.9.0'):
base_functions += [
(mom.rolling_median, np.nanmedian, 1),
]
no_nan_functions = [
(mom.rolling_max, np.max, None),
(mom.rolling_min, np.min, None),
(mom.rolling_sum, np.sum, None),
(mom.rolling_mean, np.mean, None),
(mom.rolling_std, lambda v: np.std(v, ddof=1), 1),
(mom.rolling_var, lambda v: np.var(v, ddof=1), 1),
(mom.rolling_median, np.median, None),
]
for window in [1, 2, 3, 10, 20]:
for min_periods in set([0, 1, 2, 3, 4, window]):
if min_periods and (min_periods > window):
continue
for center in [False, True]:
# test consistency between different rolling_* moments
self._test_moments_consistency(
min_periods=min_periods,
count=lambda x: mom.rolling_count(x, window=window, center=center),
mean=lambda x: mom.rolling_mean(x, window=window, min_periods=min_periods, center=center),
mock_mean=lambda x: mom.rolling_sum(x, window=window, min_periods=min_periods, center=center).divide(
mom.rolling_count(x, window=window, center=center)),
corr=lambda x, y: mom.rolling_corr(x, y, window=window, min_periods=min_periods, center=center),
var_unbiased=lambda x: mom.rolling_var(x, window=window, min_periods=min_periods, center=center),
std_unbiased=lambda x: mom.rolling_std(x, window=window, min_periods=min_periods, center=center),
cov_unbiased=lambda x, y: mom.rolling_cov(x, y, window=window, min_periods=min_periods, center=center),
var_biased=lambda x: mom.rolling_var(x, window=window, min_periods=min_periods, center=center, ddof=0),
std_biased=lambda x: mom.rolling_std(x, window=window, min_periods=min_periods, center=center, ddof=0),
cov_biased=lambda x, y: mom.rolling_cov(x, y, window=window, min_periods=min_periods, center=center, ddof=0),
var_debiasing_factors=lambda x: mom.rolling_count(x, window=window, center=center).divide(
(mom.rolling_count(x, window=window, center=center) - 1.).replace(0., np.nan)),
)
# test consistency between rolling_xyz() and either (a) rolling_apply of Series.xyz(),
# or (b) rolling_apply of np.nanxyz()
for (x, is_constant, no_nans) in self.data:
assert_equal = assert_series_equal if isinstance(x, Series) else assert_frame_equal
functions = base_functions
# GH 8269
if no_nans:
functions = base_functions + no_nan_functions
for (rolling_f, f, require_min_periods) in functions:
if require_min_periods and (min_periods is not None) and (min_periods < require_min_periods):
continue
if rolling_f is mom.rolling_count:
rolling_f_result = rolling_f(x, window=window, center=center)
rolling_apply_f_result = mom.rolling_apply(x, window=window, func=f,
min_periods=0, center=center)
else:
if rolling_f in [mom.rolling_cov, mom.rolling_corr]:
rolling_f_result = rolling_f(x, window=window, min_periods=min_periods, center=center, pairwise=False)
else:
rolling_f_result = rolling_f(x, window=window, min_periods=min_periods, center=center)
rolling_apply_f_result = mom.rolling_apply(x, window=window, func=f,
min_periods=min_periods, center=center)
assert_equal(rolling_f_result, rolling_apply_f_result)
if (rolling_f in [mom.rolling_cov, mom.rolling_corr]) and isinstance(x, DataFrame):
# test pairwise=True
rolling_f_result = rolling_f(x, x, window=window, min_periods=min_periods,
center=center, pairwise=True)
expected = Panel(items=x.index, major_axis=x.columns, minor_axis=x.columns)
for i, _ in enumerate(x.columns):
for j, _ in enumerate(x.columns):
expected.iloc[:, i, j] = rolling_f(x.iloc[:, i], x.iloc[:, j],
window=window, min_periods=min_periods, center=center)
assert_panel_equal(rolling_f_result, expected)
# binary moments
def test_rolling_cov(self):
A = self.series
B = A + randn(len(A))
result = mom.rolling_cov(A, B, 50, min_periods=25)
assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
def test_rolling_cov_pairwise(self):
self._check_pairwise_moment(mom.rolling_cov, 10, min_periods=5)
def test_rolling_corr(self):
A = self.series
B = A + randn(len(A))
result = mom.rolling_corr(A, B, 50, min_periods=25)
assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
# test for correct bias correction
a = tm.makeTimeSeries()
b = tm.makeTimeSeries()
a[:5] = np.nan
b[:10] = np.nan
result = mom.rolling_corr(a, b, len(a), min_periods=1)
assert_almost_equal(result[-1], a.corr(b))
def test_rolling_corr_pairwise(self):
self._check_pairwise_moment(mom.rolling_corr, 10, min_periods=5)
def _check_pairwise_moment(self, func, *args, **kwargs):
panel = func(self.frame, *args, **kwargs)
actual = panel.ix[:, 1, 5]
expected = func(self.frame[1], self.frame[5], *args, **kwargs)
tm.assert_series_equal(actual, expected)
def test_flex_binary_moment(self):
# GH3155
# don't blow the stack
self.assertRaises(TypeError, mom._flex_binary_moment,5,6,None)
def test_corr_sanity(self):
#GH 3155
df = DataFrame(
np.array(
[[ 0.87024726, 0.18505595],
[ 0.64355431, 0.3091617 ],
[ 0.92372966, 0.50552513],
[ 0.00203756, 0.04520709],
[ 0.84780328, 0.33394331],
[ 0.78369152, 0.63919667]])
)
res = mom.rolling_corr(df[0],df[1],5,center=True)
self.assertTrue(all([np.abs(np.nan_to_num(x)) <=1 for x in res]))
# and some fuzzing
for i in range(10):
df = DataFrame(np.random.rand(30,2))
res = mom.rolling_corr(df[0],df[1],5,center=True)
try:
self.assertTrue(all([np.abs(np.nan_to_num(x)) <=1 for x in res]))
except:
print(res)
def test_flex_binary_frame(self):
def _check(method):
series = self.frame[1]
res = method(series, self.frame, 10)
res2 = method(self.frame, series, 10)
exp = self.frame.apply(lambda x: method(series, x, 10))
tm.assert_frame_equal(res, exp)
tm.assert_frame_equal(res2, exp)
frame2 = self.frame.copy()
frame2.values[:] = np.random.randn(*frame2.shape)
res3 = method(self.frame, frame2, 10)
exp = DataFrame(dict((k, method(self.frame[k], frame2[k], 10))
for k in self.frame))
tm.assert_frame_equal(res3, exp)
methods = [mom.rolling_corr, mom.rolling_cov]
for meth in methods:
_check(meth)
def test_ewmcov(self):
self._check_binary_ew(mom.ewmcov)
def test_ewmcov_pairwise(self):
self._check_pairwise_moment(mom.ewmcov, span=10, min_periods=5)
def test_ewmcorr(self):
self._check_binary_ew(mom.ewmcorr)
def test_ewmcorr_pairwise(self):
self._check_pairwise_moment(mom.ewmcorr, span=10, min_periods=5)
def _check_binary_ew(self, func):
A = Series(randn(50), index=np.arange(50))
B = A[2:] + randn(48)
A[:10] = np.NaN
B[-10:] = np.NaN
result = func(A, B, 20, min_periods=5)
self.assertTrue(np.isnan(result.values[:14]).all())
self.assertFalse(np.isnan(result.values[14:]).any())
# GH 7898
for min_periods in (0, 1, 2):
result = func(A, B, 20, min_periods=min_periods)
# binary functions (ewmcov, ewmcorr) with bias=False require at least two values
self.assertTrue(np.isnan(result.values[:11]).all())
self.assertFalse(np.isnan(result.values[11:]).any())
# check series of length 0
result = func(Series([]), Series([]), 50, min_periods=min_periods)
assert_series_equal(result, Series([]))
# check series of length 1
result = func(Series([1.]), Series([1.]), 50, min_periods=min_periods)
assert_series_equal(result, Series([np.NaN]))
self.assertRaises(Exception, func, A, randn(50), 20, min_periods=5)
def test_expanding_apply(self):
ser = Series([])
assert_series_equal(ser, mom.expanding_apply(ser, lambda x: x.mean()))
def expanding_mean(x, min_periods=1, freq=None):
return mom.expanding_apply(x,
lambda x: x.mean(),
min_periods=min_periods,
freq=freq)
self._check_expanding(expanding_mean, np.mean)
# GH 8080
s = Series([None, None, None])
result = mom.expanding_apply(s, lambda x: len(x), min_periods=0)
expected = Series([1., 2., 3.])
assert_series_equal(result, expected)
def test_expanding_apply_args_kwargs(self):
def mean_w_arg(x, const):
return np.mean(x) + const
df = DataFrame(np.random.rand(20, 3))
expected = mom.expanding_apply(df, np.mean) + 20.
assert_frame_equal(mom.expanding_apply(df, mean_w_arg, args=(20,)),
expected)
assert_frame_equal(mom.expanding_apply(df, mean_w_arg,
kwargs={'const' : 20}),
expected)
def test_expanding_corr(self):
A = self.series.dropna()
B = (A + randn(len(A)))[:-5]
result = mom.expanding_corr(A, B)
rolling_result = mom.rolling_corr(A, B, len(A), min_periods=1)
assert_almost_equal(rolling_result, result)
def test_expanding_count(self):
result = mom.expanding_count(self.series)
assert_almost_equal(result, mom.rolling_count(self.series,
len(self.series)))
def test_expanding_quantile(self):
result = mom.expanding_quantile(self.series, 0.5)
rolling_result = mom.rolling_quantile(self.series,
len(self.series),
0.5, min_periods=1)
assert_almost_equal(result, rolling_result)
def test_expanding_cov(self):
A = self.series
B = (A + randn(len(A)))[:-5]
result = mom.expanding_cov(A, B)
rolling_result = mom.rolling_cov(A, B, len(A), min_periods=1)
assert_almost_equal(rolling_result, result)
def test_expanding_max(self):
self._check_expanding(mom.expanding_max, np.max, preserve_nan=False)
def test_expanding_cov_pairwise(self):
result = mom.expanding_cov(self.frame)
rolling_result = mom.rolling_cov(self.frame, len(self.frame),
min_periods=1)
for i in result.items:
assert_almost_equal(result[i], rolling_result[i])
def test_expanding_corr_pairwise(self):
result = mom.expanding_corr(self.frame)
rolling_result = mom.rolling_corr(self.frame, len(self.frame),
min_periods=1)
for i in result.items:
assert_almost_equal(result[i], rolling_result[i])
def test_expanding_cov_diff_index(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = mom.expanding_cov(s1, s2)
expected = Series([None, None, 2.0])
assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = mom.expanding_cov(s1, s2a)
assert_series_equal(result, expected)
s1 = Series([7, 8, 10], index=[0, 1, 3])
s2 = Series([7, 9, 10], index=[0, 2, 3])
result = mom.expanding_cov(s1, s2)
expected = Series([None, None, None, 4.5])
assert_series_equal(result, expected)
def test_expanding_corr_diff_index(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = mom.expanding_corr(s1, s2)
expected = Series([None, None, 1.0])
assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = mom.expanding_corr(s1, s2a)
assert_series_equal(result, expected)
s1 = Series([7, 8, 10], index=[0, 1, 3])
s2 = Series([7, 9, 10], index=[0, 2, 3])
result = mom.expanding_corr(s1, s2)
expected = Series([None, None, None, 1.])
assert_series_equal(result, expected)
def test_rolling_cov_diff_length(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = mom.rolling_cov(s1, s2, window=3, min_periods=2)
expected = Series([None, None, 2.0])
assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = mom.rolling_cov(s1, s2a, window=3, min_periods=2)
assert_series_equal(result, expected)
def test_rolling_corr_diff_length(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = mom.rolling_corr(s1, s2, window=3, min_periods=2)
expected = Series([None, None, 1.0])
assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = mom.rolling_corr(s1, s2a, window=3, min_periods=2)
assert_series_equal(result, expected)
def test_rolling_functions_window_non_shrinkage(self):
# GH 7764
s = Series(range(4))
s_expected = Series(np.nan, index=s.index)
df = DataFrame([[1,5], [3, 2], [3,9], [-1,0]], columns=['A','B'])
df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
df_expected_panel = Panel(items=df.index, major_axis=df.columns, minor_axis=df.columns)
functions = [lambda x: mom.rolling_cov(x, x, pairwise=False, window=10, min_periods=5),
lambda x: mom.rolling_corr(x, x, pairwise=False, window=10, min_periods=5),
lambda x: mom.rolling_max(x, window=10, min_periods=5),
lambda x: mom.rolling_min(x, window=10, min_periods=5),
lambda x: mom.rolling_sum(x, window=10, min_periods=5),
lambda x: mom.rolling_mean(x, window=10, min_periods=5),
lambda x: mom.rolling_std(x, window=10, min_periods=5),
lambda x: mom.rolling_var(x, window=10, min_periods=5),
lambda x: mom.rolling_skew(x, window=10, min_periods=5),
lambda x: mom.rolling_kurt(x, window=10, min_periods=5),
lambda x: mom.rolling_quantile(x, quantile=0.5, window=10, min_periods=5),
lambda x: mom.rolling_median(x, window=10, min_periods=5),
lambda x: mom.rolling_apply(x, func=sum, window=10, min_periods=5),
lambda x: mom.rolling_window(x, win_type='boxcar', window=10, min_periods=5),
]
for f in functions:
try:
s_result = f(s)
assert_series_equal(s_result, s_expected)
df_result = f(df)
assert_frame_equal(df_result, df_expected)
except (ImportError):
# scipy needed for rolling_window
continue
functions = [lambda x: mom.rolling_cov(x, x, pairwise=True, window=10, min_periods=5),
lambda x: mom.rolling_corr(x, x, pairwise=True, window=10, min_periods=5),
# rolling_corr_pairwise is depracated, so the following line should be deleted
# when rolling_corr_pairwise is removed.
lambda x: mom.rolling_corr_pairwise(x, x, window=10, min_periods=5),
]
for f in functions:
df_result_panel = f(df)
assert_panel_equal(df_result_panel, df_expected_panel)
def test_moment_functions_zero_length(self):
# GH 8056
s = Series()
s_expected = s
df1 = DataFrame()
df1_expected = df1
df1_expected_panel = Panel(items=df1.index, major_axis=df1.columns, minor_axis=df1.columns)
df2 = DataFrame(columns=['a'])
df2_expected = df2
df2_expected_panel = Panel(items=df2.index, major_axis=df2.columns, minor_axis=df2.columns)
functions = [lambda x: mom.expanding_count(x),
lambda x: mom.expanding_cov(x, x, pairwise=False, min_periods=5),
lambda x: mom.expanding_corr(x, x, pairwise=False, min_periods=5),
lambda x: mom.expanding_max(x, min_periods=5),
lambda x: mom.expanding_min(x, min_periods=5),
lambda x: mom.expanding_sum(x, min_periods=5),
lambda x: mom.expanding_mean(x, min_periods=5),
lambda x: mom.expanding_std(x, min_periods=5),
lambda x: mom.expanding_var(x, min_periods=5),
lambda x: mom.expanding_skew(x, min_periods=5),
lambda x: mom.expanding_kurt(x, min_periods=5),
lambda x: mom.expanding_quantile(x, quantile=0.5, min_periods=5),
lambda x: mom.expanding_median(x, min_periods=5),
lambda x: mom.expanding_apply(x, func=sum, min_periods=5),
lambda x: mom.rolling_count(x, window=10),
lambda x: mom.rolling_cov(x, x, pairwise=False, window=10, min_periods=5),
lambda x: mom.rolling_corr(x, x, pairwise=False, window=10, min_periods=5),
lambda x: mom.rolling_max(x, window=10, min_periods=5),
lambda x: mom.rolling_min(x, window=10, min_periods=5),
lambda x: mom.rolling_sum(x, window=10, min_periods=5),
lambda x: mom.rolling_mean(x, window=10, min_periods=5),
lambda x: mom.rolling_std(x, window=10, min_periods=5),
lambda x: mom.rolling_var(x, window=10, min_periods=5),
lambda x: mom.rolling_skew(x, window=10, min_periods=5),
lambda x: mom.rolling_kurt(x, window=10, min_periods=5),
lambda x: mom.rolling_quantile(x, quantile=0.5, window=10, min_periods=5),
lambda x: mom.rolling_median(x, window=10, min_periods=5),
lambda x: mom.rolling_apply(x, func=sum, window=10, min_periods=5),
lambda x: mom.rolling_window(x, win_type='boxcar', window=10, min_periods=5),
]
for f in functions:
try:
s_result = f(s)
assert_series_equal(s_result, s_expected)
df1_result = f(df1)
assert_frame_equal(df1_result, df1_expected)
df2_result = f(df2)
assert_frame_equal(df2_result, df2_expected)
except (ImportError):
# scipy needed for rolling_window
continue
functions = [lambda x: mom.expanding_cov(x, x, pairwise=True, min_periods=5),
lambda x: mom.expanding_corr(x, x, pairwise=True, min_periods=5),
lambda x: mom.rolling_cov(x, x, pairwise=True, window=10, min_periods=5),
lambda x: mom.rolling_corr(x, x, pairwise=True, window=10, min_periods=5),
# rolling_corr_pairwise is depracated, so the following line should be deleted
# when rolling_corr_pairwise is removed.
lambda x: mom.rolling_corr_pairwise(x, x, window=10, min_periods=5),
]
for f in functions:
df1_result_panel = f(df1)
assert_panel_equal(df1_result_panel, df1_expected_panel)
df2_result_panel = f(df2)
assert_panel_equal(df2_result_panel, df2_expected_panel)
def test_expanding_cov_pairwise_diff_length(self):
# GH 7512
df1 = DataFrame([[1,5], [3, 2], [3,9]], columns=['A','B'])
df1a = DataFrame([[1,5], [3,9]], index=[0,2], columns=['A','B'])
df2 = DataFrame([[5,6], [None,None], [2,1]], columns=['X','Y'])
df2a = DataFrame([[5,6], [2,1]], index=[0,2], columns=['X','Y'])
result1 = mom.expanding_cov(df1, df2, pairwise=True)[2]
result2 = mom.expanding_cov(df1, df2a, pairwise=True)[2]
result3 = mom.expanding_cov(df1a, df2, pairwise=True)[2]
result4 = mom.expanding_cov(df1a, df2a, pairwise=True)[2]
expected = DataFrame([[-3., -5.], [-6., -10.]], index=['A','B'], columns=['X','Y'])
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected)
assert_frame_equal(result4, expected)
def test_expanding_corr_pairwise_diff_length(self):
# GH 7512
df1 = DataFrame([[1,2], [3, 2], [3,4]], columns=['A','B'])
df1a = DataFrame([[1,2], [3,4]], index=[0,2], columns=['A','B'])
df2 = DataFrame([[5,6], [None,None], [2,1]], columns=['X','Y'])
df2a = DataFrame([[5,6], [2,1]], index=[0,2], columns=['X','Y'])
result1 = mom.expanding_corr(df1, df2, pairwise=True)[2]
result2 = mom.expanding_corr(df1, df2a, pairwise=True)[2]
result3 = mom.expanding_corr(df1a, df2, pairwise=True)[2]
result4 = mom.expanding_corr(df1a, df2a, pairwise=True)[2]
expected = DataFrame([[-1.0, -1.0], [-1.0, -1.0]], index=['A','B'], columns=['X','Y'])
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected)
assert_frame_equal(result4, expected)
def test_pairwise_stats_column_names_order(self):
# GH 7738
df1s = [DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[0,1]),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[1,0]),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[1,1]),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=['C','C']),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[1.,0]),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[0.,1]),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=['C',1]),
DataFrame([[2.,4.],[1.,2.],[5.,2.],[8.,1.]], columns=[1,0.]),
DataFrame([[2,4.],[1,2.],[5,2.],[8,1.]], columns=[0,1.]),
DataFrame([[2,4],[1,2],[5,2],[8,1.]], columns=[1.,'X']),
]
df2 = DataFrame([[None,1,1],[None,1,2],[None,3,2],[None,8,1]], columns=['Y','Z','X'])
s = Series([1,1,3,8])
# suppress warnings about incomparable objects, as we are deliberately testing with such column labels
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=".*incomparable objects.*", category=RuntimeWarning)
# DataFrame methods (which do not call _flex_binary_moment())
for f in [lambda x: x.cov(),
lambda x: x.corr(),
]:
results = [f(df) for df in df1s]
for (df, result) in zip(df1s, results):
assert_index_equal(result.index, df.columns)
assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equivalent(result, results[0])
# DataFrame with itself, pairwise=True
for f in [lambda x: mom.expanding_cov(x, pairwise=True),
lambda x: mom.expanding_corr(x, pairwise=True),
lambda x: mom.rolling_cov(x, window=3, pairwise=True),
lambda x: mom.rolling_corr(x, window=3, pairwise=True),
lambda x: mom.ewmcov(x, com=3, pairwise=True),
lambda x: mom.ewmcorr(x, com=3, pairwise=True),
]:
results = [f(df) for df in df1s]
for (df, result) in zip(df1s, results):
assert_index_equal(result.items, df.index)
assert_index_equal(result.major_axis, df.columns)
assert_index_equal(result.minor_axis, df.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equivalent(result, results[0])
# DataFrame with itself, pairwise=False
for f in [lambda x: mom.expanding_cov(x, pairwise=False),
lambda x: mom.expanding_corr(x, pairwise=False),
lambda x: mom.rolling_cov(x, window=3, pairwise=False),
lambda x: mom.rolling_corr(x, window=3, pairwise=False),
lambda x: mom.ewmcov(x, com=3, pairwise=False),
lambda x: mom.ewmcorr(x, com=3, pairwise=False),
]:
results = [f(df) for df in df1s]
for (df, result) in zip(df1s, results):
assert_index_equal(result.index, df.index)
assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equivalent(result, results[0])
# DataFrame with another DataFrame, pairwise=True
for f in [lambda x, y: mom.expanding_cov(x, y, pairwise=True),
lambda x, y: mom.expanding_corr(x, y, pairwise=True),
lambda x, y: mom.rolling_cov(x, y, window=3, pairwise=True),
lambda x, y: mom.rolling_corr(x, y, window=3, pairwise=True),
lambda x, y: mom.ewmcov(x, y, com=3, pairwise=True),
lambda x, y: mom.ewmcorr(x, y, com=3, pairwise=True),
]:
results = [f(df, df2) for df in df1s]
for (df, result) in zip(df1s, results):
assert_index_equal(result.items, df.index)
assert_index_equal(result.major_axis, df.columns)
assert_index_equal(result.minor_axis, df2.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equivalent(result, results[0])
# DataFrame with another DataFrame, pairwise=False
for f in [lambda x, y: mom.expanding_cov(x, y, pairwise=False),
lambda x, y: mom.expanding_corr(x, y, pairwise=False),
lambda x, y: mom.rolling_cov(x, y, window=3, pairwise=False),
lambda x, y: mom.rolling_corr(x, y, window=3, pairwise=False),
lambda x, y: mom.ewmcov(x, y, com=3, pairwise=False),
lambda x, y: mom.ewmcorr(x, y, com=3, pairwise=False),
]:
results = [f(df, df2) if df.columns.is_unique else None for df in df1s]
for (df, result) in zip(df1s, results):
if result is not None:
expected_index = df.index.union(df2.index)
expected_columns = df.columns.union(df2.columns)
assert_index_equal(result.index, expected_index)
assert_index_equal(result.columns, expected_columns)
else:
tm.assertRaisesRegexp(ValueError, "'arg1' columns are not unique", f, df, df2)
tm.assertRaisesRegexp(ValueError, "'arg2' columns are not unique", f, df2, df)
# DataFrame with a Series
for f in [lambda x, y: mom.expanding_cov(x, y),
lambda x, y: mom.expanding_corr(x, y),
lambda x, y: mom.rolling_cov(x, y, window=3),
lambda x, y: mom.rolling_corr(x, y, window=3),
lambda x, y: mom.ewmcov(x, y, com=3),
lambda x, y: mom.ewmcorr(x, y, com=3),
]:
results = [f(df, s) for df in df1s] + [f(s, df) for df in df1s]
for (df, result) in zip(df1s, results):
assert_index_equal(result.index, df.index)
assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equivalent(result, results[0])
def test_rolling_skew_edge_cases(self):
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = mom.rolling_skew(d, window=5)
assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = mom.rolling_skew(d, window=2)
assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 0.177994, 1.548824]
d = Series([-1.50837035, -0.1297039 , 0.19501095,
1.73508164, 0.41941401])
expected = Series([np.NaN, np.NaN, np.NaN,
0.177994, 1.548824])
x = mom.rolling_skew(d, window=4)
assert_series_equal(expected, x)
def test_rolling_kurt_edge_cases(self):
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = mom.rolling_kurt(d, window=5)
assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = mom.rolling_kurt(d, window=3)
assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 1.224307, 2.671499]
d = Series([-1.50837035, -0.1297039 , 0.19501095,
1.73508164, 0.41941401])
expected = Series([np.NaN, np.NaN, np.NaN,
1.224307, 2.671499])
x = mom.rolling_kurt(d, window=4)
assert_series_equal(expected, x)
def _check_expanding_ndarray(self, func, static_comp, has_min_periods=True,
has_time_rule=True, preserve_nan=True):
result = func(self.arr)
assert_almost_equal(result[10],
static_comp(self.arr[:11]))
if preserve_nan:
assert(np.isnan(result[self._nan_locs]).all())
arr = randn(50)
if has_min_periods:
result = func(arr, min_periods=30)
assert(np.isnan(result[:29]).all())
assert_almost_equal(result[-1], static_comp(arr[:50]))
# min_periods is working correctly
result = func(arr, min_periods=15)
self.assertTrue(np.isnan(result[13]))
self.assertFalse(np.isnan(result[14]))
arr2 = randn(20)
result = func(arr2, min_periods=5)
self.assertTrue(isnull(result[3]))
self.assertTrue(notnull(result[4]))
# min_periods=0
result0 = func(arr, min_periods=0)
result1 = func(arr, min_periods=1)
assert_almost_equal(result0, result1)
else:
result = func(arr)
assert_almost_equal(result[-1], static_comp(arr[:50]))
def _check_expanding_structures(self, func):
series_result = func(self.series)
tm.assert_isinstance(series_result, Series)
frame_result = func(self.frame)
self.assertEqual(type(frame_result), DataFrame)
def _check_expanding(self, func, static_comp, has_min_periods=True,
has_time_rule=True,
preserve_nan=True):
self._check_expanding_ndarray(func, static_comp,
has_min_periods=has_min_periods,
has_time_rule=has_time_rule,
preserve_nan=preserve_nan)
self._check_expanding_structures(func)
def test_rolling_max_gh6297(self):
"""Replicate result expected in GH #6297"""
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 2 datapoints on one of the days
indices.append(datetime(1975, 1, 3, 6, 0))
series = Series(range(1, 7), index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
expected = Series([1.0, 2.0, 6.0, 4.0, 5.0],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_max(series, window=1, freq='D')
assert_series_equal(expected, x)
def test_rolling_max_how_resample(self):
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be max
expected = Series([0.0, 1.0, 2.0, 3.0, 20.0],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_max(series, window=1, freq='D')
assert_series_equal(expected, x)
# Now specify median (10.0)
expected = Series([0.0, 1.0, 2.0, 3.0, 10.0],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_max(series, window=1, freq='D', how='median')
assert_series_equal(expected, x)
# Now specify mean (4+10+20)/3
v = (4.0+10.0+20.0)/3.0
expected = Series([0.0, 1.0, 2.0, 3.0, v],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_max(series, window=1, freq='D', how='mean')
assert_series_equal(expected, x)
def test_rolling_min_how_resample(self):
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be min
expected = Series([0.0, 1.0, 2.0, 3.0, 4.0],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_min(series, window=1, freq='D')
assert_series_equal(expected, x)
def test_rolling_median_how_resample(self):
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be median
expected = Series([0.0, 1.0, 2.0, 3.0, 10],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_median(series, window=1, freq='D')
assert_series_equal(expected, x)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
Silmathoron/nest-simulator | pynest/examples/spatial/hill_tononi_Vp.py | 6 | 38623 | # -*- coding: utf-8 -*-
#
# hill_tononi_Vp.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
NEST spatial example: A case-based tutorial
===========================================
:Author: Hans Ekkehard Plesser
:Institution: Norwegian University of Life Sciences
:Version: 0.4
:Date: 21 November 2012
:Copyright: The NEST Initiative (2004)
:License: Creative Commons Attribution License
**NOTE:** The network generated by this script does generate
dynamics in which the activity of the entire system, especially
Rp and Vp oscillates with approx 5 Hz. This is different from
the full model. Deviations are due to the different model type
and the elimination of a number of connections, with no changes
to the weights.
Introduction
-------------
This tutorial shows you how to implement a simplified version of the
Hill-Tononi model of the early visual pathway using NEST. The model
is described in the paper
S. L. Hill and G. Tononi.
Modeling Sleep and Wakefulness in the Thalamocortical System.
J Neurophysiology **93**:1671-1698 (2005).
Freely available via `doi 10.1152/jn.00915.2004
<http://dx.doi.org/10.1152/jn.00915.2004>`_.
We simplify the model somewhat both to keep this tutorial a bit
shorter, and because some details of the Hill-Tononi model are not
currently supported by NEST. Simplifications include:
1. We use the ``iaf_cond_alpha`` neuron model, which is
simpler than the Hill-Tononi model.
#. As the ``iaf_cond_alpha`` neuron model only supports two
synapses (labeled "ex" and "in"), we only include AMPA and
GABA_A synapses.
#. We ignore the secondary pathway (Ts, Rs, Vs), since it adds just
more of the same from a technical point of view.
#. Synaptic delays follow a Gaussian distribution in the HT
model. This implies actually a Gaussian distributions clipped at
some small, non-zero delay, since delays must be
positive. Currently, there is a bug in the module when using clipped
Gaussian distribution. We therefore draw delays from a
uniform distribution.
#. Some further adaptations are given at the appropriate locations in
the script.
This tutorial is divided in the following sections:
Philosophy_
Discusses the philosophy applied to model implementation in this
tutorial
Preparations_
Neccessary steps to use NEST
`Configurable Parameters`_
Define adjustable network parameters
`Neuron Models`_
Define the neuron models needed by the network model
Populations_
Create Populations
`Synapse models`_
Define the synapse models used in the network model
Connections_
Create Connections
`Example simulation`_
Perform a small simulation for illustration. This
section also discusses the setup for recording.
Philosophy
-----------
A network models has two essential components: *populations* and
*projections*. We first use NEST's ``CopyModel()`` mechanism to
create specific models for all populations and subpopulations in
the network, and then create the populations using the
``Create()`` function.
We use a two-stage process to create the connections, mainly
because the same configurations are required for a number of
projections: we first define dictionaries specifying the
connections, then apply these dictionaries later.
The way in which we declare the network model here is an
example. You should not consider it the last word: we expect to see
a significant development in strategies and tools for network
descriptions in the future. The following contributions to CNS*09
seem particularly interesting
- Ralf Ansorg & Lars Schwabe. Declarative model description and
code generation for hybrid individual- and population-based
simulations of the early visual system (P57);
- Sharon Crook, R. Angus Silver, & Padraig Gleeson. Describing
and exchanging models of neurons and neuronal networks with
NeuroML (F1);
as well as the following paper which will apply in PLoS
Computational Biology shortly:
- Eilen Nordlie, Marc-Oliver Gewaltig, & Hans Ekkehard Plesser.
Towards reproducible descriptions of neuronal network models.
Preparations
-------------
Please make sure that your ``PYTHONPATH`` is set correctly, so
that Python can find the NEST Python module.
**Note:** By default, the script does not show any graphics.
Set ``SHOW_FIGURES`` to ``True`` to activate graphics.
"""
from pprint import pprint
import numpy as np
import matplotlib.pyplot as plt
SHOW_FIGURES = False
if SHOW_FIGURES:
plt.ion()
else:
plt_show = plt.show
def nop(s=None, block=None):
pass
plt.show = nop
##############################################################################
# This tutorial gives a brief introduction to the ConnPlotter
# toolbox. It is by no means complete.
# Load pynest
import nest
# Make sure we start with a clean slate, even if we re-run the script
# in the same Python session.
nest.ResetKernel()
# Import math, we need Pi
import math
##############################################################################
# Configurable Parameters
# -----------------------
#
# Here we define those parameters that we take to be
# configurable. The choice of configurable parameters is obviously
# arbitrary, and in practice one would have far more configurable
# parameters. We restrict ourselves to:
#
# - Network size in neurons ``N``, each layer is ``N x N``.
# - Network size in subtended visual angle ``visSize``, in degree.
# - Temporal frequency of drifting grating input ``f_dg``, in Hz.
# - Spatial wavelength and direction of drifting grating input,
# ``lambda_dg`` and ``phi_dg``, in degree/radian.
# - Background firing rate of retinal nodes and modulation amplitude,
# ``retDC`` and ``retAC``, in Hz.
# - Simulation duration ``simtime``; actual simulation is split into
# intervals of ``sim_interval`` length, so that the network state
# can be visualized in those intervals. Times are in ms.
# - Periodic boundary conditions, ``edge_wrap``.
Params = {'N': 40,
'visSize': 8.0,
'f_dg': 2.0,
'lambda_dg': 2.0,
'phi_dg': 0.0,
'retDC': 30.0,
'retAC': 30.0,
'simtime': 100.0,
'sim_interval': 1.0,
'edge_wrap': True
}
##############################################################################
# Neuron Models
# -------------
#
# We declare models in two steps:
#
# 1. We define a dictionary specifying the NEST neuron model to use
# as well as the parameters for that model.
# #. We create three copies of this dictionary with parameters
# adjusted to the three model variants specified in Table~2 of
# Hill & Tononi (2005) (cortical excitatory, cortical inhibitory,
# thalamic)
#
# In addition, we declare the models for the stimulation and
# recording devices.
#
# The general neuron model
# ------------------------
#
# We use the ``iaf_cond_alpha`` neuron, which is an
# integrate-and-fire neuron with two conductance-based synapses which
# have alpha-function time course. Any input with positive weights
# will automatically directed to the synapse labeled ``_ex``, any
# with negative weights to the synapes labeled ``_in``. We define
# **all** parameters explicitly here, so that no information is
# hidden in the model definition in NEST. ``V_m`` is the membrane
# potential to which the model neurons will be initialized.
# The model equations and parameters for the Hill-Tononi neuron model
# are given on pp. 1677f and Tables 2 and 3 in that paper. Note some
# peculiarities and adjustments:
#
# - Hill & Tononi specify their model in terms of the membrane time
# constant, while the ``iaf_cond_alpha`` model is based on the
# membrane capcitance. Interestingly, conducantces are unitless in
# the H&T model. We thus can use the time constant directly as
# membrane capacitance.
# - The model includes sodium and potassium leak conductances. We
# combine these into a single one as follows:
#
# .. math::
#
# -g_{NaL}(V-E_{Na}) - g_{KL}(V-E_K)
# = -(g_{NaL}+g_{KL})
# \left(V-\frac{g_{NaL}E_{NaL}+g_{KL}E_K}{g_{NaL}g_{KL}}\right)
#
# - We write the resulting expressions for g_L and E_L explicitly
# below, to avoid errors in copying from our pocket calculator.
# - The paper gives a range of 1.0-1.85 for g_{KL}, we choose 1.5
# here.
# - The Hill-Tononi model has no explicit reset or refractory
# time. We arbitrarily set V_reset and t_ref.
# - The paper uses double exponential time courses for the synaptic
# conductances, with separate time constants for the rising and
# fallings flanks. Alpha functions have only a single time
# constant: we use twice the rising time constant given by Hill and
# Tononi.
# - In the general model below, we use the values for the cortical
# excitatory cells as defaults. Values will then be adapted below.
#
nest.CopyModel('iaf_cond_alpha', 'NeuronModel',
params={'C_m': 16.0,
'E_L': (0.2 * 30.0 + 1.5 * -90.0) / (0.2 + 1.5),
'g_L': 0.2 + 1.5,
'E_ex': 0.0,
'E_in': -70.0,
'V_reset': -60.0,
'V_th': -51.0,
't_ref': 2.0,
'tau_syn_ex': 1.0,
'tau_syn_in': 2.0,
'I_e': 0.0,
'V_m': -70.0})
##############################################################################
# Adaptation of models for different populations
# ----------------------------------------------
#
# We must copy the `NeuronModel` dictionary explicitly, otherwise
# Python would just create a reference.
#
# Cortical excitatory cells
# .........................
# Parameters are the same as above, so we need not adapt anything
nest.CopyModel('NeuronModel', 'CtxExNeuron')
# Cortical inhibitory cells
# .........................
nest.CopyModel('NeuronModel', 'CtxInNeuron',
params={'C_m': 8.0,
'V_th': -53.0,
't_ref': 1.0})
# Thalamic cells
# ..............
nest.CopyModel('NeuronModel', 'ThalamicNeuron',
params={'C_m': 8.0,
'V_th': -53.0,
't_ref': 1.0,
'E_in': -80.0})
##############################################################################
# Input generating nodes
# ----------------------
#
# Input is generated by sinusoidally modulate Poisson generators,
# organized in a square layer of retina nodes. These nodes require a
# slightly more complicated initialization than all other elements of
# the network:
#
# - Average firing rate ``rate``, firing rate modulation depth ``amplitude``,
# and temporal modulation frequency ``frequency`` are the same for all
# retinal nodes and are set directly below.
# - The temporal phase ``phase`` of each node depends on its position in
# the grating and can only be assigned after the retinal layer has
# been created.
nest.CopyModel('sinusoidal_poisson_generator', 'RetinaNode',
params={'amplitude': Params['retAC'],
'rate': Params['retDC'],
'frequency': Params['f_dg'],
'phase': 0.0,
'individual_spike_trains': False})
##############################################################################
# Recording nodes
# ---------------
#
# We use the ``multimeter`` device for recording from the model
# neurons. At present, ``iaf_cond_alpha`` is one of few models
# supporting ``multimeter`` recording. Support for more models will
# be added soon; until then, you need to use ``voltmeter`` to record
# from other models.
#
# We configure multimeter to record membrane potential to membrane
# potential at certain intervals to memory only. We record the node ID of
# the recorded neurons, but not the time.
nest.CopyModel('multimeter', 'RecordingNode',
params={'interval': Params['sim_interval'],
'record_from': ['V_m'],
'record_to': 'memory'})
##############################################################################
# Populations
# -----------
#
# We now create the neuron populations in the model. We define
# them in order from eye via thalamus to cortex.
#
# We first define a spatial grid defining common positions and
# parameters for all populations
layerGrid = nest.spatial.grid(shape=[Params['N'], Params['N']],
extent=[Params['visSize'], Params['visSize']],
edge_wrap=Params['edge_wrap'])
# We can pass this object to the ``positions`` argument in ``Create``
# to define the positions of the neurons.
##############################################################################
# Retina
# ------
retina = nest.Create('RetinaNode', positions=layerGrid)
# Now set phases of retinal oscillators; we create a Parameter
# which represents the phase based on the spatial properties of
# the neuron.
retina_phase = 360.0 / Params['lambda_dg'] * (math.cos(Params['phi_dg']) * nest.spatial.pos.x +
math.sin(Params['phi_dg']) * nest.spatial.pos.y)
retina.phase = retina_phase
##############################################################################
# Thalamus
# --------
#
# We first introduce specific neuron models for the thalamic relay
# cells and interneurons. These have identical properties, but by
# treating them as different populations, we can address them specifically
# when building connections.
for model_name in ('TpRelay', 'TpInter'):
nest.CopyModel('ThalamicNeuron', model_name)
# Now we can create the layers, one with relay cells,
# and one with interneurons:
TpRelay = nest.Create('TpRelay', positions=layerGrid)
TpInter = nest.Create('TpInter', positions=layerGrid)
##############################################################################
# Reticular nucleus
# -----------------
nest.CopyModel('ThalamicNeuron', 'RpNeuron')
Rp = nest.Create('RpNeuron', positions=layerGrid)
##############################################################################
# Primary visual cortex
# ---------------------
#
# We follow again the same approach as with Thalamus. We differentiate
# neuron types between layers and between pyramidal cells and
# interneurons. We have two layers for pyramidal cells, and two layers for
# interneurons for each of layers 2-3, 4, and 5-6. Finally, we need to
# differentiate between vertically and horizontally tuned populations.
# When creating the populations, we create the vertically and the
# horizontally tuned populations as separate dictionaries holding the
# layers.
for layer in ('L23', 'L4', 'L56'):
nest.CopyModel('CtxExNeuron', layer + 'pyr')
for layer in ('L23', 'L4', 'L56'):
nest.CopyModel('CtxInNeuron', layer + 'in')
name_dict = {'L23pyr': 2, 'L23in': 1,
'L4pyr': 2, 'L4in': 1,
'L56pyr': 2, 'L56in': 1}
# Now we can create the populations, suffixes h and v indicate tuning
Vp_h_layers = {}
Vp_v_layers = {}
for layer_name, num_layers in name_dict.items():
for i in range(num_layers):
Vp_h_layers['{}_{}'.format(layer_name, i)] = nest.Create(layer_name, positions=layerGrid)
Vp_v_layers['{}_{}'.format(layer_name, i)] = nest.Create(layer_name, positions=layerGrid)
##############################################################################
# Collect all populations
# -----------------------
#
# For reference purposes, e.g., printing, we collect all populations
# in a tuple:
populations = (retina, TpRelay, TpInter, Rp) + tuple(Vp_h_layers.values()) + tuple(Vp_v_layers.values())
##############################################################################
# Inspection
# ----------
#
# We can now look at the network using `PrintNodes`:
nest.PrintNodes()
# We can also try to plot a single layer in a network. All layers have
# equal positions of the nodes.
nest.PlotLayer(Rp)
plt.title('Layer Rp')
##############################################################################
# Synapse models
# =-------------
#
# Actual synapse dynamics, e.g., properties such as the synaptic time
# course, time constants, reversal potentials, are properties of
# neuron models in NEST and we set them in section `Neuron models`_
# above. When we refer to *synapse models* in NEST, we actually mean
# connectors which store information about connection weights and
# delays, as well as port numbers at the target neuron (``rport``)
# and implement synaptic plasticity. The latter two aspects are not
# relevant here.
#
# We just use NEST's ``static_synapse`` connector but copy it to
# synapse models ``AMPA`` and ``GABA_A`` for the sake of
# explicitness. Weights and delays are set as needed in section
# `Connections`_ below, as they are different from projection to
# projection. De facto, the sign of the synaptic weight decides
# whether input via a connection is handle by the ``_ex`` or the
# ``_in`` synapse.
nest.CopyModel('static_synapse', 'AMPA')
nest.CopyModel('static_synapse', 'GABA_A')
##############################################################################
# Connections
# --------------------
#
# Building connections is the most complex part of network
# construction. Connections are specified in Table 1 in the
# Hill-Tononi paper. As pointed out above, we only consider AMPA and
# GABA_A synapses here. Adding other synapses is tedious work, but
# should pose no new principal challenges. We also use a uniform in
# stead of a Gaussian distribution for the weights.
#
# The model has two identical primary visual cortex populations,
# ``Vp_v`` and ``Vp_h``, tuned to vertical and horizonal gratings,
# respectively. The *only* difference in the connection patterns
# between the two populations is the thalamocortical input to layers
# L4 and L5-6 is from a population of 8x2 and 2x8 grid locations,
# respectively. Furthermore, inhibitory connection in cortex go to
# the opposing orientation population as to the own.
#
# To save us a lot of code doubling, we thus defined properties
# dictionaries for all connections first and then use this to connect
# both populations. We follow the subdivision of connections as in
# the Hill & Tononi paper.
#
# TODO: Rewrite this note.
# **Note:** Hill & Tononi state that their model spans 8 degrees of
# visual angle and stimuli are specified according to this. On the
# other hand, all connection patterns are defined in terms of cell
# grid positions. Since the NEST defines connection patterns in terms
# of the extent given in degrees, we need to apply the following
# scaling factor to all lengths in connections:
dpc = Params['visSize'] / (Params['N'] - 1)
# We will collect all same-orientation cortico-cortical connections in
ccConnections = []
# the cross-orientation cortico-cortical connections in
ccxConnections = []
# and all cortico-thalamic connections in
ctConnections = []
##############################################################################
# Horizontal intralaminar
# -----------------------
#
# *Note:* "Horizontal" means "within the same cortical layer" in this
# case.
#
# We first define dictionaries with the (most) common properties for
# horizontal intralaminar connection. We then create copies in which
# we adapt those values that need adapting, and
horIntra_conn_spec = {"rule": "pairwise_bernoulli",
"mask": {"circular": {"radius": 12.0 * dpc}},
"p": 0.05*nest.spatial_distributions.gaussian(nest.spatial.distance, std=7.5 * dpc)}
horIntra_syn_spec = {"synapse_model": "AMPA",
"weight": 1.0,
"delay": nest.random.uniform(min=1.75, max=2.25)}
# In a loop, we run over the sources and targets and the corresponding
# dictionaries with values that needs updating.
for conn in [{"sources": "L23pyr", "targets": "L23pyr", "conn_spec": {}},
{"sources": "L23pyr", "targets": "L23in", "conn_spec": {}},
{"sources": "L4pyr", "targets": "L4pyr", "conn_spec": {"mask": {"circular": {"radius": 7.0 * dpc}}}},
{"sources": "L4pyr", "targets": "L4in", "conn_spec": {"mask": {"circular": {"radius": 7.0 * dpc}}}},
{"sources": "L56pyr", "targets": "L56pyr", "conn_spec": {}},
{"sources": "L56pyr", "targets": "L56in", "conn_spec": {}}]:
conn_spec = horIntra_conn_spec.copy()
conn_spec.update(conn['conn_spec'])
ccConnections.append([conn['sources'], conn['targets'], conn_spec, horIntra_syn_spec])
##############################################################################
# Vertical intralaminar
# -----------------------
# *Note:* "Vertical" means "between cortical layers" in this
# case.
#
# We proceed as above.
verIntra_conn_spec = {"rule": "pairwise_bernoulli",
"mask": {"circular": {"radius": 2.0 * dpc}},
"p": nest.spatial_distributions.gaussian(nest.spatial.distance, std=7.5 * dpc)}
verIntra_syn_spec = {"synapse_model": "AMPA",
"weight": 2.0,
"delay": nest.random.uniform(min=1.75, max=2.25)}
for conn in [{"sources": "L23pyr", "targets": "L56pyr",
"syn_spec": {"weight": 1.0}},
{"sources": "L23pyr", "targets": "L23in",
"syn_spec": {"weight": 1.0}},
{"sources": "L4pyr", "targets": "L23pyr", "syn_spec": {}},
{"sources": "L4pyr", "targets": "L23in", "syn_spec": {}},
{"sources": "L56pyr", "targets": "L23pyr", "syn_spec": {}},
{"sources": "L56pyr", "targets": "L23in", "syn_spec": {}},
{"sources": "L56pyr", "targets": "L4pyr", "syn_spec": {}},
{"sources": "L56pyr", "targets": "L4in", "syn_spec": {}}]:
syn_spec = verIntra_syn_spec.copy()
syn_spec.update(conn['syn_spec'])
ccConnections.append([conn['sources'], conn['targets'], verIntra_conn_spec, syn_spec])
##############################################################################
# Intracortical inhibitory
# ------------------------
#
# We proceed as above, with the following difference: each connection
# is added to both the same-orientation and the cross-orientation list of
# connections.
#
# **Note:** Weights increased from -1.0 to -2.0, to make up for missing GabaB
#
# Note that we have to specify the **weight with negative sign** to make
# the connections inhibitory.
intraInh_conn_spec = {"rule": "pairwise_bernoulli",
"mask": {"circular": {"radius": 7.0 * dpc}},
"p": 0.25*nest.spatial_distributions.gaussian(nest.spatial.distance, std=7.5 * dpc)}
intraInh_syn_spec = {"synapse_model": "GABA_A",
"weight": -2.0,
"delay": nest.random.uniform(min=1.75, max=2.25)}
for conn in [{"sources": "L23in", "targets": "L23pyr", "conn_spec": {}},
{"sources": "L23in", "targets": "L23in", "conn_spec": {}},
{"sources": "L4in", "targets": "L4pyr", "conn_spec": {}},
{"sources": "L4in", "targets": "L4in", "conn_spec": {}},
{"sources": "L56in", "targets": "L56pyr", "conn_spec": {}},
{"sources": "L56in", "targets": "L56in", "conn_spec": {}}]:
conn_spec = intraInh_conn_spec.copy()
conn_spec.update(conn['conn_spec'])
ccConnections.append([conn['sources'], conn['targets'], conn_spec, intraInh_syn_spec])
ccxConnections.append([conn['sources'], conn['targets'], conn_spec, intraInh_syn_spec])
##############################################################################
# Corticothalamic
# ---------------
# We proceed as above.
corThal_conn_spec = {"rule": "pairwise_bernoulli",
"mask": {"circular": {"radius": 5.0 * dpc}},
"p": 0.5*nest.spatial_distributions.gaussian(nest.spatial.distance, std=7.5 * dpc)}
corThal_syn_spec = {"synapse_model": "AMPA",
"weight": 1.0,
"delay": nest.random.uniform(min=7.5, max=8.5)}
for conn in [{"sources": "L56pyr", "conn_spec": {}}]:
conn_spec = intraInh_conn_spec.copy()
conn_spec.update(conn['conn_spec'])
syn_spec = intraInh_syn_spec.copy()
ctConnections.append([conn['sources'], conn_spec, syn_spec])
##############################################################################
# Corticoreticular
# ----------------
#
# In this case, there is only a single connection, so we define the
# dictionaries directly; it is very similar to corThal, and to show that,
# we copy first, then update.
corRet = corThal_conn_spec.copy()
corRet_syn_spec = corThal_syn_spec.copy()
corRet_syn_spec.update({"weight": 2.5})
##############################################################################
# Build all connections beginning in cortex
# -----------------------------------------
#
# Cortico-cortical, same orientation
print("Connecting: cortico-cortical, same orientation")
for source, target, conn_spec, syn_spec in ccConnections:
for src_i in range(name_dict[source]):
for tgt_i in range(name_dict[target]):
source_name = '{}_{}'.format(source, src_i)
target_name = '{}_{}'.format(target, tgt_i)
nest.Connect(Vp_h_layers[source_name], Vp_h_layers[target_name], conn_spec, syn_spec)
nest.Connect(Vp_v_layers[source_name], Vp_v_layers[target_name], conn_spec, syn_spec)
# Cortico-cortical, cross-orientation
print("Connecting: cortico-cortical, other orientation")
for source, target, conn_spec, syn_spec in ccxConnections:
for src_i in range(name_dict[source]):
for tgt_i in range(name_dict[target]):
source_name = '{}_{}'.format(source, src_i)
target_name = '{}_{}'.format(target, tgt_i)
nest.Connect(Vp_h_layers[source_name], Vp_v_layers[target_name], conn_spec, syn_spec)
nest.Connect(Vp_v_layers[source_name], Vp_h_layers[target_name], conn_spec, syn_spec)
# Cortico-thalamic connections
print("Connecting: cortico-thalamic")
for source, conn_spec, syn_spec in ctConnections:
for src_i in range(name_dict[source]):
source_name = '{}_{}'.format(source, src_i)
nest.Connect(Vp_h_layers[source_name], TpRelay, conn_spec, syn_spec)
nest.Connect(Vp_h_layers[source_name], TpInter, conn_spec, syn_spec)
nest.Connect(Vp_v_layers[source_name], TpRelay, conn_spec, syn_spec)
nest.Connect(Vp_v_layers[source_name], TpInter, conn_spec, syn_spec)
for src_i in range(name_dict['L56pyr']):
source_name = 'L56pyr_{}'.format(src_i)
nest.Connect(Vp_h_layers[source_name], Rp, corRet, corRet_syn_spec)
nest.Connect(Vp_v_layers[source_name], Rp, corRet, corRet_syn_spec)
##############################################################################
# Thalamo-cortical connections
# ----------------------------
#
# **Note:** According to the text on p. 1674, bottom right, of the Hill &
# Tononi paper, thalamocortical connections are created by selecting from
# the thalamic population for each L4 pyramidal cell. We must therefore
# specify that we want to select from the source neurons.
#
# We first handle the rectangular thalamocortical connections.
thalCorRect_conn_spec = {"rule": "pairwise_bernoulli",
"use_on_source": True}
thalCorRect_syn_spec = {"synapse_model": "AMPA",
"weight": 5.0,
"delay": nest.random.uniform(min=2.75, max=3.25)}
print("Connecting: thalamo-cortical")
# Horizontally tuned
thalCorRect_conn_spec.update(
{"mask": {"rectangular": {"lower_left": [-4.0 * dpc, -1.0 * dpc],
"upper_right": [4.0 * dpc, 1.0 * dpc]}}})
for conn in [{"targets": "L4pyr", "conn_spec": {"p": 0.5}},
{"targets": "L56pyr", "conn_spec": {"p": 0.3}}]:
conn_spec = thalCorRect_conn_spec.copy()
conn_spec.update(conn['conn_spec'])
for trg_i in range(name_dict[conn['targets']]):
target_name = '{}_{}'.format(conn['targets'], trg_i)
nest.Connect(
TpRelay, Vp_h_layers[target_name], conn_spec, thalCorRect_syn_spec)
# Vertically tuned
thalCorRect_conn_spec.update(
{"mask": {"rectangular": {"lower_left": [-1.0 * dpc, -4.0 * dpc],
"upper_right": [1.0 * dpc, 4.0 * dpc]}}})
for conn in [{"targets": "L4pyr", "conn_spec": {"p": 0.5}},
{"targets": "L56pyr", "conn_spec": {"p": 0.3}}]:
conn_spec = thalCorRect_conn_spec.copy()
conn_spec.update(conn['conn_spec'])
for trg_i in range(name_dict[conn['targets']]):
target_name = '{}_{}'.format(conn['targets'], trg_i)
nest.Connect(
TpRelay, Vp_v_layers[target_name], conn_spec, thalCorRect_syn_spec)
# Diffuse connections
thalCorDiff_conn_spec = {"rule": "pairwise_bernoulli",
"use_on_source": True,
"mask": {"circular": {"radius": 5.0 * dpc}},
"p": 0.1*nest.spatial_distributions.gaussian(nest.spatial.distance, std=7.5*dpc)}
thalCorDiff_syn_spec = {"synapse_model": "AMPA",
"weight": 5.0,
"delay": nest.random.uniform(min=2.75, max=3.25)}
for conn in [{"targets": "L4pyr"},
{"targets": "L56pyr"}]:
for trg_i in range(name_dict[conn['targets']]):
target_name = '{}_{}'.format(conn['targets'], trg_i)
nest.Connect(TpRelay, Vp_h_layers[target_name], thalCorDiff_conn_spec, thalCorDiff_syn_spec)
nest.Connect(TpRelay, Vp_v_layers[target_name], thalCorDiff_conn_spec, thalCorDiff_syn_spec)
##############################################################################
# Thalamic connections
# --------------------
#
# Connections inside thalamus, including Rp.
#
# *Note:* In Hill & Tononi, the inhibition between Rp cells is mediated by
# GABA_B receptors. We use GABA_A receptors here to provide some
# self-dampening of Rp.
#
# **Note 1:** The following code had a serious bug in v. 0.1: During the first
# iteration of the loop, "synapse_model" and "weights" were set to "AMPA" and
# "0.1", respectively and remained unchanged, so that all connections were
# created as excitatory connections, even though they should have been
# inhibitory. We now specify synapse_model and weight explicitly for each
# connection to avoid this.
#
# **Note 2:** The following code also had a serious bug in v. 0.4: In the
# loop the connection dictionary would be updated directly, i.e. without
# making a copy. This lead to the entry ``'sources': 'TpInter'`` being
# left in the dictionary when connecting with ``Rp`` sources. Therefore no
# connections for the connections with ``Rp`` as source would be created
# here.
thal_conn_spec = {"rule": "pairwise_bernoulli"}
thal_syn_spec = {"delay": nest.random.uniform(min=1.75, max=2.25)}
print("Connecting: intra-thalamic")
for src, tgt, conn, syn in [(TpRelay, Rp,
{"mask": {"circular": {"radius": 2.0 * dpc}},
"p": nest.spatial_distributions.gaussian(
nest.spatial.distance, std=7.5*dpc)},
{"synapse_model": "AMPA",
"weight": 2.0}),
(TpInter, TpRelay,
{"mask": {"circular": {"radius": 2.0 * dpc}},
"p": 0.25*nest.spatial_distributions.gaussian(
nest.spatial.distance, std=7.5*dpc)},
{"synapse_model": "GABA_A",
"weight": -1.0}),
(TpInter, TpInter,
{"mask": {"circular": {"radius": 2.0 * dpc}},
"p": 0.25*nest.spatial_distributions.gaussian(
nest.spatial.distance, std=7.5*dpc)},
{"synapse_model": "GABA_A", "weight": -1.0}),
(Rp, TpRelay, {"mask": {"circular": {"radius": 12.0 * dpc}},
"p": 0.15*nest.spatial_distributions.gaussian(
nest.spatial.distance, std=7.5*dpc)},
{"synapse_model": "GABA_A", "weight": -1.0}),
(Rp, TpInter, {"mask": {"circular": {"radius": 12.0 * dpc}},
"p": 0.15*nest.spatial_distributions.gaussian(
nest.spatial.distance, std=7.5*dpc)},
{"synapse_model": "GABA_A", "weight": -1.0}),
(Rp, Rp, {"mask": {"circular": {"radius": 12.0 * dpc}},
"p": 0.5*nest.spatial_distributions.gaussian(
nest.spatial.distance, std=7.5*dpc)},
{"synapse_model": "GABA_A", "weight": -1.0})
]:
conn_spec = thal_conn_spec.copy()
conn_spec.update(conn)
syn_spec = thal_syn_spec.copy()
syn_spec.update(syn)
nest.Connect(src, tgt, conn_spec, syn_spec)
##############################################################################
# Thalamic input
# --------------
#
# Input to the thalamus from the retina.
#
# **Note:** Hill & Tononi specify a delay of 0 ms for this connection.
# We use 1 ms here.
retThal_conn_spec = {"rule": "pairwise_bernoulli",
"mask": {"circular": {"radius": 1.0 * dpc}},
"p": 0.75*nest.spatial_distributions.gaussian(nest.spatial.distance, std=2.5*dpc)}
retThal_syn_spec = {"weight": 10.0,
"delay": 1.0,
"synapse_model": "AMPA"}
print("Connecting: retino-thalamic")
nest.Connect(retina, TpRelay, retThal_conn_spec, retThal_syn_spec)
nest.Connect(retina, TpInter, retThal_conn_spec, retThal_syn_spec)
##############################################################################
# Checks on connections
# ---------------------
#
# As a very simple check on the connections created, we inspect
# the connections from the central node of various layers.
# Connections from Retina to TpRelay
retina_ctr_node_id = nest.FindCenterElement(retina)
retina_ctr_index = retina.index(retina_ctr_node_id.global_id)
conns = nest.GetConnections(retina[retina_ctr_index], TpRelay)
nest.PlotTargets(retina[retina_ctr_index], TpRelay, 'AMPA')
plt.title('Connections Retina -> TpRelay')
# Connections from TpRelay to L4pyr in Vp (horizontally tuned)
TpRelay_ctr_node_id = nest.FindCenterElement(TpRelay)
TpRelay_ctr_index = TpRelay.index(TpRelay_ctr_node_id.global_id)
nest.PlotTargets(TpRelay[TpRelay_ctr_index], Vp_h_layers['L4pyr_0'], 'AMPA')
plt.title('Connections TpRelay -> Vp(h) L4pyr')
# Connections from TpRelay to L4pyr in Vp (vertically tuned)
nest.PlotTargets(TpRelay[TpRelay_ctr_index], Vp_v_layers['L4pyr_0'], 'AMPA')
plt.title('Connections TpRelay -> Vp(v) L4pyr')
# Block until the figures are closed before we continue.
plt.show(block=True)
##############################################################################
# Recording devices
# -----------------
#
# This recording device setup is a bit makeshift. For each population
# we want to record from, we create one ``multimeter``, then select
# all nodes of the right model from the target population and
# connect. ``loc`` is the subplot location for the layer.
print("Connecting: Recording devices")
recorders = {}
for name, loc, population in [('TpRelay', 0, TpRelay),
('Rp', 1, Rp),
('Vp_v L4pyr 1', 2, Vp_v_layers['L4pyr_0']),
('Vp_v L4pyr 2', 3, Vp_v_layers['L4pyr_1']),
('Vp_h L4pyr 1', 4, Vp_h_layers['L4pyr_0']),
('Vp_h L4pyr 2', 5, Vp_h_layers['L4pyr_1'])]:
recorders[name] = (nest.Create('RecordingNode'), loc)
# one recorder to all targets
nest.Connect(recorders[name][0], population)
##############################################################################
# Example simulation
# --------------------
#
# This simulation is set up to create a step-wise visualization of
# the membrane potential. To do so, we simulate ``sim_interval``
# milliseconds at a time, then read out data from the multimeters,
# clear data from the multimeters and plot the data as pseudocolor
# plots.
# show time during simulation
nest.SetKernelStatus({'print_time': True})
# lower and upper limits for color scale, for each of the
# populations recorded.
vmn = [-80, -80, -80, -80, -80, -80]
vmx = [-50, -50, -50, -50, -50, -50]
nest.Simulate(Params['sim_interval'])
# Set up the figure, assume six recorders.
fig, axes = plt.subplots(2, 3)
images = []
for i, ax in enumerate(axes.flat):
# We initialize with an empty image
images.append(ax.imshow([[0.]], aspect='equal', interpolation='nearest',
extent=(0, Params['N'] + 1, 0, Params['N'] + 1),
vmin=vmn[i], vmax=vmx[i], cmap='plasma'))
fig.colorbar(images[-1], ax=ax)
# loop over simulation intervals
for t in np.arange(0, Params['simtime'], Params['sim_interval']):
# do the simulation
nest.Simulate(Params['sim_interval'])
# now plot data from each recorder in turn
for name, rec_item in recorders.items():
recorder, subplot_pos = rec_item
ax = axes.flat[subplot_pos]
im = images[subplot_pos]
d = recorder.get('events', 'V_m')
# clear data from multimeter
recorder.n_events = 0
# update image data and title
im.set_data(np.reshape(d, (Params['N'], Params['N'])))
ax.set_title(name + ', t = %6.1f ms' % nest.GetKernelStatus()['time'])
# We need to pause because drawing of the figure happens while the main code is sleeping
plt.pause(0.0001)
# just for some information at the end
pprint(nest.GetKernelStatus())
| gpl-2.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/scipy/stats/tests/test_morestats.py | 17 | 54641 | # Author: Travis Oliphant, 2002
#
# Further enhancements and tests added by numerous SciPy developers.
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.random import RandomState
from numpy.testing import (TestCase, run_module_suite, assert_array_equal,
assert_almost_equal, assert_array_less, assert_array_almost_equal,
assert_raises, assert_, assert_allclose, assert_equal, dec, assert_warns)
from scipy import stats
from common_tests import check_named_results
# Matplotlib is not a scipy dependency but is optionally used in probplot, so
# check if it's available
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
g1 = [1.006, 0.996, 0.998, 1.000, 0.992, 0.993, 1.002, 0.999, 0.994, 1.000]
g2 = [0.998, 1.006, 1.000, 1.002, 0.997, 0.998, 0.996, 1.000, 1.006, 0.988]
g3 = [0.991, 0.987, 0.997, 0.999, 0.995, 0.994, 1.000, 0.999, 0.996, 0.996]
g4 = [1.005, 1.002, 0.994, 1.000, 0.995, 0.994, 0.998, 0.996, 1.002, 0.996]
g5 = [0.998, 0.998, 0.982, 0.990, 1.002, 0.984, 0.996, 0.993, 0.980, 0.996]
g6 = [1.009, 1.013, 1.009, 0.997, 0.988, 1.002, 0.995, 0.998, 0.981, 0.996]
g7 = [0.990, 1.004, 0.996, 1.001, 0.998, 1.000, 1.018, 1.010, 0.996, 1.002]
g8 = [0.998, 1.000, 1.006, 1.000, 1.002, 0.996, 0.998, 0.996, 1.002, 1.006]
g9 = [1.002, 0.998, 0.996, 0.995, 0.996, 1.004, 1.004, 0.998, 0.999, 0.991]
g10 = [0.991, 0.995, 0.984, 0.994, 0.997, 0.997, 0.991, 0.998, 1.004, 0.997]
class TestBayes_mvs(TestCase):
def test_basic(self):
# Expected values in this test simply taken from the function. For
# some checks regarding correctness of implementation, see review in
# gh-674
data = [6, 9, 12, 7, 8, 8, 13]
mean, var, std = stats.bayes_mvs(data)
assert_almost_equal(mean.statistic, 9.0)
assert_allclose(mean.minmax, (7.1036502226125329, 10.896349777387467),
rtol=1e-14)
assert_almost_equal(var.statistic, 10.0)
assert_allclose(var.minmax, (3.1767242068607087, 24.45910381334018),
rtol=1e-09)
assert_almost_equal(std.statistic, 2.9724954732045084, decimal=14)
assert_allclose(std.minmax, (1.7823367265645145, 4.9456146050146312),
rtol=1e-14)
def test_empty_input(self):
assert_raises(ValueError, stats.bayes_mvs, [])
def test_result_attributes(self):
x = np.arange(15)
attributes = ('statistic', 'minmax')
res = stats.bayes_mvs(x)
for i in res:
check_named_results(i, attributes)
class TestMvsdist(TestCase):
def test_basic(self):
data = [6, 9, 12, 7, 8, 8, 13]
mean, var, std = stats.mvsdist(data)
assert_almost_equal(mean.mean(), 9.0)
assert_allclose(mean.interval(0.9), (7.1036502226125329,
10.896349777387467), rtol=1e-14)
assert_almost_equal(var.mean(), 10.0)
assert_allclose(var.interval(0.9), (3.1767242068607087,
24.45910381334018), rtol=1e-09)
assert_almost_equal(std.mean(), 2.9724954732045084, decimal=14)
assert_allclose(std.interval(0.9), (1.7823367265645145,
4.9456146050146312), rtol=1e-14)
def test_empty_input(self):
assert_raises(ValueError, stats.mvsdist, [])
def test_bad_arg(self):
# Raise ValueError if fewer than two data points are given.
data = [1]
assert_raises(ValueError, stats.mvsdist, data)
def test_warns(self):
# regression test for gh-5270
# make sure there are no spurious divide-by-zero warnings
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
[x.mean() for x in stats.mvsdist([1, 2, 3])]
[x.mean() for x in stats.mvsdist([1, 2, 3, 4, 5])]
class TestShapiro(TestCase):
def test_basic(self):
x1 = [0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,
4.43, 0.21, 4.75, 0.71, 1.52, 3.24,
0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]
w, pw = stats.shapiro(x1)
assert_almost_equal(w, 0.90047299861907959, 6)
assert_almost_equal(pw, 0.042089745402336121, 6)
x2 = [1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,
3.48, 1.10, 0.88, -0.51, 1.46, 0.52, 6.20, 1.69,
0.08, 3.67, 2.81, 3.49]
w, pw = stats.shapiro(x2)
assert_almost_equal(w, 0.9590270, 6)
assert_almost_equal(pw, 0.52460, 3)
# Verified against R
np.random.seed(12345678)
x3 = stats.norm.rvs(loc=5, scale=3, size=100)
w, pw = stats.shapiro(x3)
assert_almost_equal(w, 0.9772805571556091, decimal=6)
assert_almost_equal(pw, 0.08144091814756393, decimal=3)
# Extracted from original paper
x4 = [0.139, 0.157, 0.175, 0.256, 0.344, 0.413, 0.503, 0.577, 0.614,
0.655, 0.954, 1.392, 1.557, 1.648, 1.690, 1.994, 2.174, 2.206,
3.245, 3.510, 3.571, 4.354, 4.980, 6.084, 8.351]
W_expected = 0.83467
p_expected = 0.000914
w, pw = stats.shapiro(x4)
assert_almost_equal(w, W_expected, decimal=4)
assert_almost_equal(pw, p_expected, decimal=5)
def test_2d(self):
x1 = [[0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,
4.43, 0.21, 4.75], [0.71, 1.52, 3.24,
0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]]
w, pw = stats.shapiro(x1)
assert_almost_equal(w, 0.90047299861907959, 6)
assert_almost_equal(pw, 0.042089745402336121, 6)
x2 = [[1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,
3.48, 1.10], [0.88, -0.51, 1.46, 0.52, 6.20, 1.69,
0.08, 3.67, 2.81, 3.49]]
w, pw = stats.shapiro(x2)
assert_almost_equal(w, 0.9590270, 6)
assert_almost_equal(pw, 0.52460, 3)
def test_empty_input(self):
assert_raises(ValueError, stats.shapiro, [])
assert_raises(ValueError, stats.shapiro, [[], [], []])
def test_not_enough_values(self):
assert_raises(ValueError, stats.shapiro, [1, 2])
assert_raises(ValueError, stats.shapiro, [[], [2]])
def test_bad_arg(self):
# Length of x is less than 3.
x = [1]
assert_raises(ValueError, stats.shapiro, x)
def test_nan_input(self):
x = np.arange(10.)
x[9] = np.nan
w, pw = stats.shapiro(x)
assert_equal(w, np.nan)
assert_almost_equal(pw, 1.0)
class TestAnderson(TestCase):
def test_normal(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A, crit, sig = stats.anderson(x1)
assert_array_less(crit[:-1], A)
A, crit, sig = stats.anderson(x2)
assert_array_less(A, crit[-2:])
v = np.ones(10)
v[0] = 0
A, crit, sig = stats.anderson(v)
# The expected statistic 3.208057 was computed independently of scipy.
# For example, in R:
# > library(nortest)
# > v <- rep(1, 10)
# > v[1] <- 0
# > result <- ad.test(v)
# > result$statistic
# A
# 3.208057
assert_allclose(A, 3.208057)
def test_expon(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A, crit, sig = stats.anderson(x1, 'expon')
assert_array_less(A, crit[-2:])
olderr = np.seterr(all='ignore')
try:
A, crit, sig = stats.anderson(x2, 'expon')
finally:
np.seterr(**olderr)
assert_(A > crit[-1])
def test_gumbel(self):
# Regression test for gh-6306. Before that issue was fixed,
# this case would return a2=inf.
v = np.ones(100)
v[0] = 0.0
a2, crit, sig = stats.anderson(v, 'gumbel')
# A brief reimplementation of the calculation of the statistic.
n = len(v)
xbar, s = stats.gumbel_l.fit(v)
logcdf = stats.gumbel_l.logcdf(v, xbar, s)
logsf = stats.gumbel_l.logsf(v, xbar, s)
i = np.arange(1, n+1)
expected_a2 = -n - np.mean((2*i - 1) * (logcdf + logsf[::-1]))
assert_allclose(a2, expected_a2)
def test_bad_arg(self):
assert_raises(ValueError, stats.anderson, [1], dist='plate_of_shrimp')
def test_result_attributes(self):
rs = RandomState(1234567890)
x = rs.standard_exponential(size=50)
res = stats.anderson(x)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
def test_gumbel_l(self):
# gh-2592, gh-6337
# Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist.
rs = RandomState(1234567890)
x = rs.gumbel(size=100)
A1, crit1, sig1 = stats.anderson(x, 'gumbel')
A2, crit2, sig2 = stats.anderson(x, 'gumbel_l')
assert_allclose(A2, A1)
def test_gumbel_r(self):
# gh-2592, gh-6337
# Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist.
rs = RandomState(1234567890)
x1 = rs.gumbel(size=100)
x2 = np.ones(100)
A1, crit1, sig1 = stats.anderson(x1, 'gumbel_r')
A2, crit2, sig2 = stats.anderson(x2, 'gumbel_r')
assert_array_less(A1, crit1[-2:])
assert_(A2 > crit2[-1])
class TestAndersonKSamp(TestCase):
def test_example1a(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
assert_warns(UserWarning, stats.anderson_ksamp, (t1, t2, t3, t4),
midrank=False)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)
assert_almost_equal(Tk, 4.449, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm, 4)
assert_almost_equal(p, 0.0021, 4)
def test_example1b(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass arrays
t1 = np.array([38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0])
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=True)
assert_almost_equal(Tk, 4.480, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm, 4)
assert_almost_equal(p, 0.0020, 4)
def test_example2a(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
# Pass lists instead of arrays
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=False)
assert_almost_equal(Tk, 3.288, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm, 4)
assert_almost_equal(p, 0.0041, 4)
def test_example2b(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=True)
assert_almost_equal(Tk, 3.294, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm, 4)
assert_almost_equal(p, 0.0041, 4)
def test_not_enough_samples(self):
assert_raises(ValueError, stats.anderson_ksamp, np.ones(5))
def test_no_distinct_observations(self):
assert_raises(ValueError, stats.anderson_ksamp,
(np.ones(5), np.ones(5)))
def test_empty_sample(self):
assert_raises(ValueError, stats.anderson_ksamp, (np.ones(5), []))
def test_result_attributes(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
res = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
class TestAnsari(TestCase):
def test_small(self):
x = [1, 2, 3, 3, 4]
y = [3, 2, 6, 1, 6, 1, 4, 1]
with warnings.catch_warnings(record=True): # Ties preclude use ...
W, pval = stats.ansari(x, y)
assert_almost_equal(W, 23.5, 11)
assert_almost_equal(pval, 0.13499256881897437, 11)
def test_approx(self):
ramsay = np.array((111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
101, 96, 97, 102, 107, 113, 116, 113, 110, 98))
parekh = np.array((107, 108, 106, 98, 105, 103, 110, 105, 104,
100, 96, 108, 103, 104, 114, 114, 113, 108,
106, 99))
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message="Ties preclude use of exact statistic.")
W, pval = stats.ansari(ramsay, parekh)
assert_almost_equal(W, 185.5, 11)
assert_almost_equal(pval, 0.18145819972867083, 11)
def test_exact(self):
W, pval = stats.ansari([1, 2, 3, 4], [15, 5, 20, 8, 10, 12])
assert_almost_equal(W, 10.0, 11)
assert_almost_equal(pval, 0.533333333333333333, 7)
def test_bad_arg(self):
assert_raises(ValueError, stats.ansari, [], [1])
assert_raises(ValueError, stats.ansari, [1], [])
def test_result_attributes(self):
x = [1, 2, 3, 3, 4]
y = [3, 2, 6, 1, 6, 1, 4, 1]
with warnings.catch_warnings(record=True): # Ties preclude use ...
res = stats.ansari(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
class TestBartlett(TestCase):
def test_data(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
T, pval = stats.bartlett(*args)
assert_almost_equal(T, 20.78587342806484, 7)
assert_almost_equal(pval, 0.0136358632781, 7)
def test_bad_arg(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.bartlett, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.bartlett(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_empty_arg(self):
args = (g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, [])
assert_equal((np.nan, np.nan), stats.bartlett(*args))
class TestLevene(TestCase):
def test_data(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
W, pval = stats.levene(*args)
assert_almost_equal(W, 1.7059176930008939, 7)
assert_almost_equal(pval, 0.0990829755522, 7)
def test_trimmed1(self):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
W1, pval1 = stats.levene(g1, g2, g3, center='mean')
W2, pval2 = stats.levene(g1, g2, g3, center='trimmed',
proportiontocut=0.0)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
np.random.seed(1234)
x2 = np.random.permutation(x)
# Use center='trimmed'
W0, pval0 = stats.levene(x, y, center='trimmed',
proportiontocut=0.125)
W1, pval1 = stats.levene(x2, y, center='trimmed',
proportiontocut=0.125)
# Trim the data here, and use center='mean'
W2, pval2 = stats.levene(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(W0, W2)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_equal_mean_median(self):
x = np.linspace(-1, 1, 21)
np.random.seed(1234)
x2 = np.random.permutation(x)
y = x**3
W1, pval1 = stats.levene(x, y, center='mean')
W2, pval2 = stats.levene(x2, y, center='median')
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1, 1, 21)
assert_raises(TypeError, stats.levene, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1, 1, 21)
assert_raises(ValueError, stats.levene, x, x, center='trim')
def test_too_few_args(self):
assert_raises(ValueError, stats.levene, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.levene(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
class TestBinomP(TestCase):
def test_data(self):
pval = stats.binom_test(100, 250)
assert_almost_equal(pval, 0.0018833009350757682, 11)
pval = stats.binom_test(201, 405)
assert_almost_equal(pval, 0.92085205962670713, 11)
pval = stats.binom_test([682, 243], p=3.0/4)
assert_almost_equal(pval, 0.38249155957481695, 11)
def test_bad_len_x(self):
# Length of x must be 1 or 2.
assert_raises(ValueError, stats.binom_test, [1, 2, 3])
def test_bad_n(self):
# len(x) is 1, but n is invalid.
# Missing n
assert_raises(ValueError, stats.binom_test, [100])
# n less than x[0]
assert_raises(ValueError, stats.binom_test, [100], n=50)
def test_bad_p(self):
assert_raises(ValueError, stats.binom_test, [50, 50], p=2.0)
def test_alternatives(self):
res = stats.binom_test(51, 235, p=1./6, alternative='less')
assert_almost_equal(res, 0.982022657605858)
res = stats.binom_test(51, 235, p=1./6, alternative='greater')
assert_almost_equal(res, 0.02654424571169085)
res = stats.binom_test(51, 235, p=1./6, alternative='two-sided')
assert_almost_equal(res, 0.0437479701823997)
class TestFligner(TestCase):
def test_data(self):
# numbers from R: fligner.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.fligner(x1, x1**2),
(3.2282229927203536, 0.072379187848207877),
11)
def test_trimmed1(self):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
Xsq1, pval1 = stats.fligner(g1, g2, g3, center='mean')
Xsq2, pval2 = stats.fligner(g1, g2, g3, center='trimmed',
proportiontocut=0.0)
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
# Use center='trimmed'
Xsq1, pval1 = stats.fligner(x, y, center='trimmed',
proportiontocut=0.125)
# Trim the data here, and use center='mean'
Xsq2, pval2 = stats.fligner(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
# The following test looks reasonable at first, but fligner() uses the
# function stats.rankdata(), and in one of the cases in this test,
# there are ties, while in the other (because of normal rounding
# errors) there are not. This difference leads to differences in the
# third significant digit of W.
#
#def test_equal_mean_median(self):
# x = np.linspace(-1,1,21)
# y = x**3
# W1, pval1 = stats.fligner(x, y, center='mean')
# W2, pval2 = stats.fligner(x, y, center='median')
# assert_almost_equal(W1, W2)
# assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1, 1, 21)
assert_raises(TypeError, stats.fligner, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1, 1, 21)
assert_raises(ValueError, stats.fligner, x, x, center='trim')
def test_bad_num_args(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.fligner, [1])
def test_empty_arg(self):
x = np.arange(5)
assert_equal((np.nan, np.nan), stats.fligner(x, x**2, []))
class TestMood(TestCase):
def test_mood(self):
# numbers from R: mood.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.mood(x1, x1**2),
(-1.3830857299399906, 0.16663858066771478),
11)
def test_mood_order_of_args(self):
# z should change sign when the order of arguments changes, pvalue
# should not change
np.random.seed(1234)
x1 = np.random.randn(10, 1)
x2 = np.random.randn(15, 1)
z1, p1 = stats.mood(x1, x2)
z2, p2 = stats.mood(x2, x1)
assert_array_almost_equal([z1, p1], [-z2, p2])
def test_mood_with_axis_none(self):
# Test with axis = None, compare with results from R
x1 = [-0.626453810742332, 0.183643324222082, -0.835628612410047,
1.59528080213779, 0.329507771815361, -0.820468384118015,
0.487429052428485, 0.738324705129217, 0.575781351653492,
-0.305388387156356, 1.51178116845085, 0.389843236411431,
-0.621240580541804, -2.2146998871775, 1.12493091814311,
-0.0449336090152309, -0.0161902630989461, 0.943836210685299,
0.821221195098089, 0.593901321217509]
x2 = [-0.896914546624981, 0.184849184646742, 1.58784533120882,
-1.13037567424629, -0.0802517565509893, 0.132420284381094,
0.707954729271733, -0.23969802417184, 1.98447393665293,
-0.138787012119665, 0.417650750792556, 0.981752777463662,
-0.392695355503813, -1.03966897694891, 1.78222896030858,
-2.31106908460517, 0.878604580921265, 0.035806718015226,
1.01282869212708, 0.432265154539617, 2.09081920524915,
-1.19992581964387, 1.58963820029007, 1.95465164222325,
0.00493777682814261, -2.45170638784613, 0.477237302613617,
-0.596558168631403, 0.792203270299649, 0.289636710177348]
x1 = np.array(x1)
x2 = np.array(x2)
x1.shape = (10, 2)
x2.shape = (15, 2)
assert_array_almost_equal(stats.mood(x1, x2, axis=None),
[-1.31716607555, 0.18778296257])
def test_mood_2d(self):
# Test if the results of mood test in 2-D case are consistent with the
# R result for the same inputs. Numbers from R mood.test().
ny = 5
np.random.seed(1234)
x1 = np.random.randn(10, ny)
x2 = np.random.randn(15, ny)
z_vectest, pval_vectest = stats.mood(x1, x2)
for j in range(ny):
assert_array_almost_equal([z_vectest[j], pval_vectest[j]],
stats.mood(x1[:, j], x2[:, j]))
# inverse order of dimensions
x1 = x1.transpose()
x2 = x2.transpose()
z_vectest, pval_vectest = stats.mood(x1, x2, axis=1)
for i in range(ny):
# check axis handling is self consistent
assert_array_almost_equal([z_vectest[i], pval_vectest[i]],
stats.mood(x1[i, :], x2[i, :]))
def test_mood_3d(self):
shape = (10, 5, 6)
np.random.seed(1234)
x1 = np.random.randn(*shape)
x2 = np.random.randn(*shape)
for axis in range(3):
z_vectest, pval_vectest = stats.mood(x1, x2, axis=axis)
# Tests that result for 3-D arrays is equal to that for the
# same calculation on a set of 1-D arrays taken from the
# 3-D array
axes_idx = ([1, 2], [0, 2], [0, 1]) # the two axes != axis
for i in range(shape[axes_idx[axis][0]]):
for j in range(shape[axes_idx[axis][1]]):
if axis == 0:
slice1 = x1[:, i, j]
slice2 = x2[:, i, j]
elif axis == 1:
slice1 = x1[i, :, j]
slice2 = x2[i, :, j]
else:
slice1 = x1[i, j, :]
slice2 = x2[i, j, :]
assert_array_almost_equal([z_vectest[i, j],
pval_vectest[i, j]],
stats.mood(slice1, slice2))
def test_mood_bad_arg(self):
# Raise ValueError when the sum of the lengths of the args is
# less than 3
assert_raises(ValueError, stats.mood, [1], [])
class TestProbplot(TestCase):
def test_basic(self):
np.random.seed(12345)
x = stats.norm.rvs(size=20)
osm, osr = stats.probplot(x, fit=False)
osm_expected = [-1.8241636, -1.38768012, -1.11829229, -0.91222575,
-0.73908135, -0.5857176, -0.44506467, -0.31273668,
-0.18568928, -0.06158146, 0.06158146, 0.18568928,
0.31273668, 0.44506467, 0.5857176, 0.73908135,
0.91222575, 1.11829229, 1.38768012, 1.8241636]
assert_allclose(osr, np.sort(x))
assert_allclose(osm, osm_expected)
res, res_fit = stats.probplot(x, fit=True)
res_fit_expected = [1.05361841, 0.31297795, 0.98741609]
assert_allclose(res_fit, res_fit_expected)
def test_sparams_keyword(self):
np.random.seed(123456)
x = stats.norm.rvs(size=100)
# Check that None, () and 0 (loc=0, for normal distribution) all work
# and give the same results
osm1, osr1 = stats.probplot(x, sparams=None, fit=False)
osm2, osr2 = stats.probplot(x, sparams=0, fit=False)
osm3, osr3 = stats.probplot(x, sparams=(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osm1, osm3)
assert_allclose(osr1, osr2)
assert_allclose(osr1, osr3)
# Check giving (loc, scale) params for normal distribution
osm, osr = stats.probplot(x, sparams=(), fit=False)
def test_dist_keyword(self):
np.random.seed(12345)
x = stats.norm.rvs(size=20)
osm1, osr1 = stats.probplot(x, fit=False, dist='t', sparams=(3,))
osm2, osr2 = stats.probplot(x, fit=False, dist=stats.t, sparams=(3,))
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
assert_raises(ValueError, stats.probplot, x, dist='wrong-dist-name')
assert_raises(AttributeError, stats.probplot, x, dist=[])
class custom_dist(object):
"""Some class that looks just enough like a distribution."""
def ppf(self, q):
return stats.norm.ppf(q, loc=2)
osm1, osr1 = stats.probplot(x, sparams=(2,), fit=False)
osm2, osr2 = stats.probplot(x, dist=custom_dist(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
np.random.seed(7654321)
fig = plt.figure()
fig.add_subplot(111)
x = stats.t.rvs(3, size=100)
res1, fitres1 = stats.probplot(x, plot=plt)
plt.close()
res2, fitres2 = stats.probplot(x, plot=None)
res3 = stats.probplot(x, fit=False, plot=plt)
plt.close()
res4 = stats.probplot(x, fit=False, plot=None)
# Check that results are consistent between combinations of `fit` and
# `plot` keywords.
assert_(len(res1) == len(res2) == len(res3) == len(res4) == 2)
assert_allclose(res1, res2)
assert_allclose(res1, res3)
assert_allclose(res1, res4)
assert_allclose(fitres1, fitres2)
# Check that a Matplotlib Axes object is accepted
fig = plt.figure()
ax = fig.add_subplot(111)
stats.probplot(x, fit=False, plot=ax)
plt.close()
def test_probplot_bad_args(self):
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.probplot, [1], dist="plate_of_shrimp")
def test_empty(self):
assert_equal(stats.probplot([], fit=False),
(np.array([]), np.array([])))
assert_equal(stats.probplot([], fit=True),
((np.array([]), np.array([])),
(np.nan, np.nan, 0.0)))
def test_array_of_size_one(self):
with np.errstate(invalid='ignore'):
assert_equal(stats.probplot([1], fit=True),
((np.array([0.]), np.array([1])),
(np.nan, np.nan, 0.0)))
def test_wilcoxon_bad_arg():
# Raise ValueError when two args of different lengths are given or
# zero_method is unknown.
assert_raises(ValueError, stats.wilcoxon, [1], [1, 2])
assert_raises(ValueError, stats.wilcoxon, [1, 2], [1, 2], "dummy")
def test_wilcoxon_arg_type():
# Should be able to accept list as arguments.
# Address issue 6070.
arr = [1, 2, 3, 0, -1, 3, 1, 2, 1, 1, 2]
_ = stats.wilcoxon(arr, zero_method="pratt")
_ = stats.wilcoxon(arr, zero_method="zsplit")
_ = stats.wilcoxon(arr, zero_method="wilcox")
class TestKstat(TestCase):
def test_moments_normal_distribution(self):
np.random.seed(32149)
data = np.random.randn(12345)
moments = []
for n in [1, 2, 3, 4]:
moments.append(stats.kstat(data, n))
expected = [0.011315, 1.017931, 0.05811052, 0.0754134]
assert_allclose(moments, expected, rtol=1e-4)
# test equivalence with `stats.moment`
m1 = stats.moment(data, moment=1)
m2 = stats.moment(data, moment=2)
m3 = stats.moment(data, moment=3)
assert_allclose((m1, m2, m3), expected[:-1], atol=0.02, rtol=1e-2)
def test_empty_input(self):
assert_raises(ValueError, stats.kstat, [])
def test_nan_input(self):
data = np.arange(10.)
data[6] = np.nan
assert_equal(stats.kstat(data), np.nan)
def test_kstat_bad_arg(self):
# Raise ValueError if n > 4 or n < 1.
data = np.arange(10)
for n in [0, 4.001]:
assert_raises(ValueError, stats.kstat, data, n=n)
class TestKstatVar(TestCase):
def test_empty_input(self):
assert_raises(ValueError, stats.kstatvar, [])
def test_nan_input(self):
data = np.arange(10.)
data[6] = np.nan
assert_equal(stats.kstat(data), np.nan)
def test_bad_arg(self):
# Raise ValueError is n is not 1 or 2.
data = [1]
n = 10
assert_raises(ValueError, stats.kstatvar, data, n=n)
class TestPpccPlot(TestCase):
def setUp(self):
np.random.seed(7654321)
self.x = stats.loggamma.rvs(5, size=500) + 5
def test_basic(self):
N = 5
svals, ppcc = stats.ppcc_plot(self.x, -10, 10, N=N)
ppcc_expected = [0.21139644, 0.21384059, 0.98766719, 0.97980182,
0.93519298]
assert_allclose(svals, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
def test_dist(self):
# Test that we can specify distributions both by name and as objects.
svals1, ppcc1 = stats.ppcc_plot(self.x, -10, 10, dist='tukeylambda')
svals2, ppcc2 = stats.ppcc_plot(self.x, -10, 10,
dist=stats.tukeylambda)
assert_allclose(svals1, svals2, rtol=1e-20)
assert_allclose(ppcc1, ppcc2, rtol=1e-20)
# Test that 'tukeylambda' is the default dist
svals3, ppcc3 = stats.ppcc_plot(self.x, -10, 10)
assert_allclose(svals1, svals3, rtol=1e-20)
assert_allclose(ppcc1, ppcc3, rtol=1e-20)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=plt)
plt.close()
# Check that a Matplotlib Axes object is accepted
fig.add_subplot(111)
ax = fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `b` has to be larger than `a`
assert_raises(ValueError, stats.ppcc_plot, self.x, 1, 0)
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.ppcc_plot, [1, 2, 3], 0, 1,
dist="plate_of_shrimp")
def test_empty(self):
# For consistency with probplot return for one empty array,
# ppcc contains all zeros and svals is the same as for normal array
# input.
svals, ppcc = stats.ppcc_plot([], 0, 1)
assert_allclose(svals, np.linspace(0, 1, num=80))
assert_allclose(ppcc, np.zeros(80, dtype=float))
class TestPpccMax(TestCase):
def test_ppcc_max_bad_arg(self):
# Raise ValueError when given an invalid distribution.
data = [1]
assert_raises(ValueError, stats.ppcc_max, data, dist="plate_of_shrimp")
def test_ppcc_max_basic(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x), -0.71215366521264145, decimal=5)
def test_dist(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
# Test that we can specify distributions both by name and as objects.
max1 = stats.ppcc_max(x, dist='tukeylambda')
max2 = stats.ppcc_max(x, dist=stats.tukeylambda)
assert_almost_equal(max1, -0.71215366521264145, decimal=5)
assert_almost_equal(max2, -0.71215366521264145, decimal=5)
# Test that 'tukeylambda' is the default dist
max3 = stats.ppcc_max(x)
assert_almost_equal(max3, -0.71215366521264145, decimal=5)
def test_brack(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
assert_raises(ValueError, stats.ppcc_max, x, brack=(0.0, 1.0, 0.5))
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x, brack=(0, 1)),
-0.71215366521264145, decimal=5)
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x, brack=(-2, 2)),
-0.71215366521264145, decimal=5)
class TestBoxcox_llf(TestCase):
def test_basic(self):
np.random.seed(54321)
x = stats.norm.rvs(size=10000, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf_expected = -x.size / 2. * np.log(np.sum(x.std()**2))
assert_allclose(llf, llf_expected)
def test_array_like(self):
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, list(x))
assert_allclose(llf, llf2, rtol=1e-12)
def test_2d_input(self):
# Note: boxcox_llf() was already working with 2-D input (sort of), so
# keep it like that. boxcox() doesn't work with 2-D input though, due
# to brent() returning a scalar.
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T)
assert_allclose([llf, llf], llf2, rtol=1e-12)
def test_empty(self):
assert_(np.isnan(stats.boxcox_llf(1, [])))
class TestBoxcox(TestCase):
def test_fixed_lmbda(self):
np.random.seed(12345)
x = stats.loggamma.rvs(5, size=50) + 5
xt = stats.boxcox(x, lmbda=1)
assert_allclose(xt, x - 1)
xt = stats.boxcox(x, lmbda=-1)
assert_allclose(xt, 1 - 1/x)
xt = stats.boxcox(x, lmbda=0)
assert_allclose(xt, np.log(x))
# Also test that array_like input works
xt = stats.boxcox(list(x), lmbda=0)
assert_allclose(xt, np.log(x))
def test_lmbda_None(self):
np.random.seed(1234567)
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
np.random.seed(1245)
lmbda = 2.5
x = stats.norm.rvs(loc=10, size=50000)
x_inv = (x * lmbda + 1)**(-lmbda)
xt, maxlog = stats.boxcox(x_inv)
assert_almost_equal(maxlog, -1 / lmbda, decimal=2)
def test_alpha(self):
np.random.seed(1234)
x = stats.loggamma.rvs(5, size=50) + 5
# Some regular values for alpha, on a small sample size
_, _, interval = stats.boxcox(x, alpha=0.75)
assert_allclose(interval, [4.004485780226041, 5.138756355035744])
_, _, interval = stats.boxcox(x, alpha=0.05)
assert_allclose(interval, [1.2138178554857557, 8.209033272375663])
# Try some extreme values, see we don't hit the N=500 limit
x = stats.loggamma.rvs(7, size=500) + 15
_, _, interval = stats.boxcox(x, alpha=0.001)
assert_allclose(interval, [0.3988867, 11.40553131])
_, _, interval = stats.boxcox(x, alpha=0.999)
assert_allclose(interval, [5.83316246, 5.83735292])
def test_boxcox_bad_arg(self):
# Raise ValueError if any data value is negative.
x = np.array([-1])
assert_raises(ValueError, stats.boxcox, x)
def test_empty(self):
assert_(stats.boxcox([]).shape == (0,))
class TestBoxcoxNormmax(TestCase):
def setUp(self):
np.random.seed(12345)
self.x = stats.loggamma.rvs(5, size=50) + 5
def test_pearsonr(self):
maxlog = stats.boxcox_normmax(self.x)
assert_allclose(maxlog, 1.804465, rtol=1e-6)
def test_mle(self):
maxlog = stats.boxcox_normmax(self.x, method='mle')
assert_allclose(maxlog, 1.758101, rtol=1e-6)
# Check that boxcox() uses 'mle'
_, maxlog_boxcox = stats.boxcox(self.x)
assert_allclose(maxlog_boxcox, maxlog)
def test_all(self):
maxlog_all = stats.boxcox_normmax(self.x, method='all')
assert_allclose(maxlog_all, [1.804465, 1.758101], rtol=1e-6)
class TestBoxcoxNormplot(TestCase):
def setUp(self):
np.random.seed(7654321)
self.x = stats.loggamma.rvs(5, size=500) + 5
def test_basic(self):
N = 5
lmbdas, ppcc = stats.boxcox_normplot(self.x, -10, 10, N=N)
ppcc_expected = [0.57783375, 0.83610988, 0.97524311, 0.99756057,
0.95843297]
assert_allclose(lmbdas, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=plt)
plt.close()
# Check that a Matplotlib Axes object is accepted
fig.add_subplot(111)
ax = fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `lb` has to be larger than `la`
assert_raises(ValueError, stats.boxcox_normplot, self.x, 1, 0)
# `x` can not contain negative values
assert_raises(ValueError, stats.boxcox_normplot, [-1, 1], 0, 1)
def test_empty(self):
assert_(stats.boxcox_normplot([], 0, 1).size == 0)
class TestCircFuncs(TestCase):
def test_circfuncs(self):
x = np.array([355, 5, 2, 359, 10, 350])
M = stats.circmean(x, high=360)
Mval = 0.167690146
assert_allclose(M, Mval, rtol=1e-7)
V = stats.circvar(x, high=360)
Vval = 42.51955609
assert_allclose(V, Vval, rtol=1e-7)
S = stats.circstd(x, high=360)
Sval = 6.520702116
assert_allclose(S, Sval, rtol=1e-7)
def test_circfuncs_small(self):
x = np.array([20, 21, 22, 18, 19, 20.5, 19.2])
M1 = x.mean()
M2 = stats.circmean(x, high=360)
assert_allclose(M2, M1, rtol=1e-5)
V1 = x.var()
V2 = stats.circvar(x, high=360)
assert_allclose(V2, V1, rtol=1e-4)
S1 = x.std()
S2 = stats.circstd(x, high=360)
assert_allclose(S2, S1, rtol=1e-4)
def test_circmean_axis(self):
x = np.array([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356]])
M1 = stats.circmean(x, high=360)
M2 = stats.circmean(x.ravel(), high=360)
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=1)
M2 = [stats.circmean(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=0)
M2 = [stats.circmean(x[:, i], high=360) for i in range(x.shape[1])]
assert_allclose(M1, M2, rtol=1e-14)
def test_circvar_axis(self):
x = np.array([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356]])
V1 = stats.circvar(x, high=360)
V2 = stats.circvar(x.ravel(), high=360)
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=1)
V2 = [stats.circvar(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=0)
V2 = [stats.circvar(x[:, i], high=360) for i in range(x.shape[1])]
assert_allclose(V1, V2, rtol=1e-11)
def test_circstd_axis(self):
x = np.array([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356]])
S1 = stats.circstd(x, high=360)
S2 = stats.circstd(x.ravel(), high=360)
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=1)
S2 = [stats.circstd(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=0)
S2 = [stats.circstd(x[:, i], high=360) for i in range(x.shape[1])]
assert_allclose(S1, S2, rtol=1e-11)
def test_circfuncs_array_like(self):
x = [355, 5, 2, 359, 10, 350]
assert_allclose(stats.circmean(x, high=360), 0.167690146, rtol=1e-7)
assert_allclose(stats.circvar(x, high=360), 42.51955609, rtol=1e-7)
assert_allclose(stats.circstd(x, high=360), 6.520702116, rtol=1e-7)
def test_empty(self):
assert_(np.isnan(stats.circmean([])))
assert_(np.isnan(stats.circstd([])))
assert_(np.isnan(stats.circvar([])))
def test_circmean_scalar(self):
x = 1.
M1 = x
M2 = stats.circmean(x)
assert_allclose(M2, M1, rtol=1e-5)
def test_circmean_range(self):
# regression test for gh-6420: circmean(..., high, low) must be
# between `high` and `low`
m = stats.circmean(np.arange(0, 2, 0.1), np.pi, -np.pi)
assert_(m < np.pi)
assert_(m > -np.pi)
def test_accuracy_wilcoxon():
freq = [1, 4, 16, 15, 8, 4, 5, 1, 2]
nums = range(-4, 5)
x = np.concatenate([[u] * v for u, v in zip(nums, freq)])
y = np.zeros(x.size)
T, p = stats.wilcoxon(x, y, "pratt")
assert_allclose(T, 423)
assert_allclose(p, 0.00197547303533107)
T, p = stats.wilcoxon(x, y, "zsplit")
assert_allclose(T, 441)
assert_allclose(p, 0.0032145343172473055)
T, p = stats.wilcoxon(x, y, "wilcox")
assert_allclose(T, 327)
assert_allclose(p, 0.00641346115861)
# Test the 'correction' option, using values computed in R with:
# > wilcox.test(x, y, paired=TRUE, exact=FALSE, correct={FALSE,TRUE})
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
T, p = stats.wilcoxon(x, y, correction=False)
assert_equal(T, 34)
assert_allclose(p, 0.6948866, rtol=1e-6)
T, p = stats.wilcoxon(x, y, correction=True)
assert_equal(T, 34)
assert_allclose(p, 0.7240817, rtol=1e-6)
def test_wilcoxon_result_attributes():
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
res = stats.wilcoxon(x, y, correction=False)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_wilcoxon_tie():
# Regression test for gh-2391.
# Corresponding R code is:
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=FALSE)
# > result$p.value
# [1] 0.001565402
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=TRUE)
# > result$p.value
# [1] 0.001904195
stat, p = stats.wilcoxon([0.1] * 10)
expected_p = 0.001565402
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
stat, p = stats.wilcoxon([0.1] * 10, correction=True)
expected_p = 0.001904195
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
class TestMedianTest(TestCase):
def test_bad_n_samples(self):
# median_test requires at least two samples.
assert_raises(ValueError, stats.median_test, [1, 2, 3])
def test_empty_sample(self):
# Each sample must contain at least one value.
assert_raises(ValueError, stats.median_test, [], [1, 2, 3])
def test_empty_when_ties_ignored(self):
# The grand median is 1, and all values in the first argument are
# equal to the grand median. With ties="ignore", those values are
# ignored, which results in the first sample being (in effect) empty.
# This should raise a ValueError.
assert_raises(ValueError, stats.median_test,
[1, 1, 1, 1], [2, 0, 1], [2, 0], ties="ignore")
def test_empty_contingency_row(self):
# The grand median is 1, and with the default ties="below", all the
# values in the samples are counted as being below the grand median.
# This would result a row of zeros in the contingency table, which is
# an error.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1])
# With ties="above", all the values are counted as above the
# grand median.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1],
ties="above")
def test_bad_ties(self):
assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5],
ties="foo")
def test_bad_nan_policy(self):
assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5], nan_policy='foobar')
def test_bad_keyword(self):
assert_raises(TypeError, stats.median_test, [1, 2, 3], [4, 5],
foo="foo")
def test_simple(self):
x = [1, 2, 3]
y = [1, 2, 3]
stat, p, med, tbl = stats.median_test(x, y)
# The median is floating point, but this equality test should be safe.
assert_equal(med, 2.0)
assert_array_equal(tbl, [[1, 1], [2, 2]])
# The expected values of the contingency table equal the contingency
# table, so the statistic should be 0 and the p-value should be 1.
assert_equal(stat, 0)
assert_equal(p, 1)
def test_ties_options(self):
# Test the contingency table calculation.
x = [1, 2, 3, 4]
y = [5, 6]
z = [7, 8, 9]
# grand median is 5.
# Default 'ties' option is "below".
stat, p, m, tbl = stats.median_test(x, y, z)
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 1, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="ignore")
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 0, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="above")
assert_equal(m, 5)
assert_equal(tbl, [[0, 2, 3], [4, 0, 0]])
def test_nan_policy_options(self):
x = [1, 2, np.nan]
y = [4, 5, 6]
mt1 = stats.median_test(x, y, nan_policy='propagate')
s, p, m, t = stats.median_test(x, y, nan_policy='omit')
assert_equal(mt1, (np.nan, np.nan, np.nan, None))
assert_allclose(s, 0.31250000000000006)
assert_allclose(p, 0.57615012203057869)
assert_equal(m, 4.0)
assert_equal(t, np.array([[0, 2],[2, 1]]))
assert_raises(ValueError, stats.median_test, x, y, nan_policy='raise')
def test_basic(self):
# median_test calls chi2_contingency to compute the test statistic
# and p-value. Make sure it hasn't screwed up the call...
x = [1, 2, 3, 4, 5]
y = [2, 4, 6, 8]
stat, p, m, tbl = stats.median_test(x, y)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, lambda_=0)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, lambda_=0)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, correction=False)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, correction=False)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
if __name__ == "__main__":
run_module_suite()
| mit |
scienceopen/gaussfitter | ah_bootstrap.py | 31 | 36162 | """
This bootstrap module contains code for ensuring that the astropy_helpers
package will be importable by the time the setup.py script runs. It also
includes some workarounds to ensure that a recent-enough version of setuptools
is being used for the installation.
This module should be the first thing imported in the setup.py of distributions
that make use of the utilities in astropy_helpers. If the distribution ships
with its own copy of astropy_helpers, this module will first attempt to import
from the shipped copy. However, it will also check PyPI to see if there are
any bug-fix releases on top of the current version that may be useful to get
past platform-specific bugs that have been fixed. When running setup.py, use
the ``--offline`` command-line option to disable the auto-upgrade checks.
When this module is imported or otherwise executed it automatically calls a
main function that attempts to read the project's setup.cfg file, which it
checks for a configuration section called ``[ah_bootstrap]`` the presences of
that section, and options therein, determine the next step taken: If it
contains an option called ``auto_use`` with a value of ``True``, it will
automatically call the main function of this module called
`use_astropy_helpers` (see that function's docstring for full details).
Otherwise no further action is taken (however,
``ah_bootstrap.use_astropy_helpers`` may be called manually from within the
setup.py script).
Additional options in the ``[ah_boostrap]`` section of setup.cfg have the same
names as the arguments to `use_astropy_helpers`, and can be used to configure
the bootstrap script when ``auto_use = True``.
See https://github.com/astropy/astropy-helpers for more details, and for the
latest version of this module.
"""
import contextlib
import errno
import imp
import io
import locale
import os
import re
import subprocess as sp
import sys
try:
from ConfigParser import ConfigParser, RawConfigParser
except ImportError:
from configparser import ConfigParser, RawConfigParser
if sys.version_info[0] < 3:
_str_types = (str, unicode)
_text_type = unicode
PY3 = False
else:
_str_types = (str, bytes)
_text_type = str
PY3 = True
# What follows are several import statements meant to deal with install-time
# issues with either missing or misbehaving pacakges (including making sure
# setuptools itself is installed):
# Some pre-setuptools checks to ensure that either distribute or setuptools >=
# 0.7 is used (over pre-distribute setuptools) if it is available on the path;
# otherwise the latest setuptools will be downloaded and bootstrapped with
# ``ez_setup.py``. This used to be included in a separate file called
# setuptools_bootstrap.py; but it was combined into ah_bootstrap.py
try:
import pkg_resources
_setuptools_req = pkg_resources.Requirement.parse('setuptools>=0.7')
# This may raise a DistributionNotFound in which case no version of
# setuptools or distribute is properly installed
_setuptools = pkg_resources.get_distribution('setuptools')
if _setuptools not in _setuptools_req:
# Older version of setuptools; check if we have distribute; again if
# this results in DistributionNotFound we want to give up
_distribute = pkg_resources.get_distribution('distribute')
if _setuptools != _distribute:
# It's possible on some pathological systems to have an old version
# of setuptools and distribute on sys.path simultaneously; make
# sure distribute is the one that's used
sys.path.insert(1, _distribute.location)
_distribute.activate()
imp.reload(pkg_resources)
except:
# There are several types of exceptions that can occur here; if all else
# fails bootstrap and use the bootstrapped version
from ez_setup import use_setuptools
use_setuptools()
# Note: The following import is required as a workaround to
# https://github.com/astropy/astropy-helpers/issues/89; if we don't import this
# module now, it will get cleaned up after `run_setup` is called, but that will
# later cause the TemporaryDirectory class defined in it to stop working when
# used later on by setuptools
try:
import setuptools.py31compat
except ImportError:
pass
# matplotlib can cause problems if it is imported from within a call of
# run_setup(), because in some circumstances it will try to write to the user's
# home directory, resulting in a SandboxViolation. See
# https://github.com/matplotlib/matplotlib/pull/4165
# Making sure matplotlib, if it is available, is imported early in the setup
# process can mitigate this (note importing matplotlib.pyplot has the same
# issue)
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot
except:
# Ignore if this fails for *any* reason*
pass
# End compatibility imports...
# In case it didn't successfully import before the ez_setup checks
import pkg_resources
from setuptools import Distribution
from setuptools.package_index import PackageIndex
from setuptools.sandbox import run_setup
from distutils import log
from distutils.debug import DEBUG
# TODO: Maybe enable checking for a specific version of astropy_helpers?
DIST_NAME = 'astropy-helpers'
PACKAGE_NAME = 'astropy_helpers'
# Defaults for other options
DOWNLOAD_IF_NEEDED = True
INDEX_URL = 'https://pypi.python.org/simple'
USE_GIT = True
OFFLINE = False
AUTO_UPGRADE = True
# A list of all the configuration options and their required types
CFG_OPTIONS = [
('auto_use', bool), ('path', str), ('download_if_needed', bool),
('index_url', str), ('use_git', bool), ('offline', bool),
('auto_upgrade', bool)
]
class _Bootstrapper(object):
"""
Bootstrapper implementation. See ``use_astropy_helpers`` for parameter
documentation.
"""
def __init__(self, path=None, index_url=None, use_git=None, offline=None,
download_if_needed=None, auto_upgrade=None):
if path is None:
path = PACKAGE_NAME
if not (isinstance(path, _str_types) or path is False):
raise TypeError('path must be a string or False')
if PY3 and not isinstance(path, _text_type):
fs_encoding = sys.getfilesystemencoding()
path = path.decode(fs_encoding) # path to unicode
self.path = path
# Set other option attributes, using defaults where necessary
self.index_url = index_url if index_url is not None else INDEX_URL
self.offline = offline if offline is not None else OFFLINE
# If offline=True, override download and auto-upgrade
if self.offline:
download_if_needed = False
auto_upgrade = False
self.download = (download_if_needed
if download_if_needed is not None
else DOWNLOAD_IF_NEEDED)
self.auto_upgrade = (auto_upgrade
if auto_upgrade is not None else AUTO_UPGRADE)
# If this is a release then the .git directory will not exist so we
# should not use git.
git_dir_exists = os.path.exists(os.path.join(os.path.dirname(__file__), '.git'))
if use_git is None and not git_dir_exists:
use_git = False
self.use_git = use_git if use_git is not None else USE_GIT
# Declared as False by default--later we check if astropy-helpers can be
# upgraded from PyPI, but only if not using a source distribution (as in
# the case of import from a git submodule)
self.is_submodule = False
@classmethod
def main(cls, argv=None):
if argv is None:
argv = sys.argv
config = cls.parse_config()
config.update(cls.parse_command_line(argv))
auto_use = config.pop('auto_use', False)
bootstrapper = cls(**config)
if auto_use:
# Run the bootstrapper, otherwise the setup.py is using the old
# use_astropy_helpers() interface, in which case it will run the
# bootstrapper manually after reconfiguring it.
bootstrapper.run()
return bootstrapper
@classmethod
def parse_config(cls):
if not os.path.exists('setup.cfg'):
return {}
cfg = ConfigParser()
try:
cfg.read('setup.cfg')
except Exception as e:
if DEBUG:
raise
log.error(
"Error reading setup.cfg: {0!r}\n{1} will not be "
"automatically bootstrapped and package installation may fail."
"\n{2}".format(e, PACKAGE_NAME, _err_help_msg))
return {}
if not cfg.has_section('ah_bootstrap'):
return {}
config = {}
for option, type_ in CFG_OPTIONS:
if not cfg.has_option('ah_bootstrap', option):
continue
if type_ is bool:
value = cfg.getboolean('ah_bootstrap', option)
else:
value = cfg.get('ah_bootstrap', option)
config[option] = value
return config
@classmethod
def parse_command_line(cls, argv=None):
if argv is None:
argv = sys.argv
config = {}
# For now we just pop recognized ah_bootstrap options out of the
# arg list. This is imperfect; in the unlikely case that a setup.py
# custom command or even custom Distribution class defines an argument
# of the same name then we will break that. However there's a catch22
# here that we can't just do full argument parsing right here, because
# we don't yet know *how* to parse all possible command-line arguments.
if '--no-git' in argv:
config['use_git'] = False
argv.remove('--no-git')
if '--offline' in argv:
config['offline'] = True
argv.remove('--offline')
return config
def run(self):
strategies = ['local_directory', 'local_file', 'index']
dist = None
# First, remove any previously imported versions of astropy_helpers;
# this is necessary for nested installs where one package's installer
# is installing another package via setuptools.sandbox.run_setup, as in
# the case of setup_requires
for key in list(sys.modules):
try:
if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'):
del sys.modules[key]
except AttributeError:
# Sometimes mysterious non-string things can turn up in
# sys.modules
continue
# Check to see if the path is a submodule
self.is_submodule = self._check_submodule()
for strategy in strategies:
method = getattr(self, 'get_{0}_dist'.format(strategy))
dist = method()
if dist is not None:
break
else:
raise _AHBootstrapSystemExit(
"No source found for the {0!r} package; {0} must be "
"available and importable as a prerequisite to building "
"or installing this package.".format(PACKAGE_NAME))
# This is a bit hacky, but if astropy_helpers was loaded from a
# directory/submodule its Distribution object gets a "precedence" of
# "DEVELOP_DIST". However, in other cases it gets a precedence of
# "EGG_DIST". However, when activing the distribution it will only be
# placed early on sys.path if it is treated as an EGG_DIST, so always
# do that
dist = dist.clone(precedence=pkg_resources.EGG_DIST)
# Otherwise we found a version of astropy-helpers, so we're done
# Just active the found distribution on sys.path--if we did a
# download this usually happens automatically but it doesn't hurt to
# do it again
# Note: Adding the dist to the global working set also activates it
# (makes it importable on sys.path) by default.
try:
pkg_resources.working_set.add(dist, replace=True)
except TypeError:
# Some (much) older versions of setuptools do not have the
# replace=True option here. These versions are old enough that all
# bets may be off anyways, but it's easy enough to work around just
# in case...
if dist.key in pkg_resources.working_set.by_key:
del pkg_resources.working_set.by_key[dist.key]
pkg_resources.working_set.add(dist)
@property
def config(self):
"""
A `dict` containing the options this `_Bootstrapper` was configured
with.
"""
return dict((optname, getattr(self, optname))
for optname, _ in CFG_OPTIONS if hasattr(self, optname))
def get_local_directory_dist(self):
"""
Handle importing a vendored package from a subdirectory of the source
distribution.
"""
if not os.path.isdir(self.path):
return
log.info('Attempting to import astropy_helpers from {0} {1!r}'.format(
'submodule' if self.is_submodule else 'directory',
self.path))
dist = self._directory_import()
if dist is None:
log.warn(
'The requested path {0!r} for importing {1} does not '
'exist, or does not contain a copy of the {1} '
'package.'.format(self.path, PACKAGE_NAME))
elif self.auto_upgrade and not self.is_submodule:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_local_file_dist(self):
"""
Handle importing from a source archive; this also uses setup_requires
but points easy_install directly to the source archive.
"""
if not os.path.isfile(self.path):
return
log.info('Attempting to unpack and import astropy_helpers from '
'{0!r}'.format(self.path))
try:
dist = self._do_download(find_links=[self.path])
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to import {0} from the specified archive {1!r}: '
'{2}'.format(PACKAGE_NAME, self.path, str(e)))
dist = None
if dist is not None and self.auto_upgrade:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_index_dist(self):
if not self.download:
log.warn('Downloading {0!r} disabled.'.format(DIST_NAME))
return None
log.warn(
"Downloading {0!r}; run setup.py with the --offline option to "
"force offline installation.".format(DIST_NAME))
try:
dist = self._do_download()
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to download and/or install {0!r} from {1!r}:\n'
'{2}'.format(DIST_NAME, self.index_url, str(e)))
dist = None
# No need to run auto-upgrade here since we've already presumably
# gotten the most up-to-date version from the package index
return dist
def _directory_import(self):
"""
Import astropy_helpers from the given path, which will be added to
sys.path.
Must return True if the import succeeded, and False otherwise.
"""
# Return True on success, False on failure but download is allowed, and
# otherwise raise SystemExit
path = os.path.abspath(self.path)
# Use an empty WorkingSet rather than the man
# pkg_resources.working_set, since on older versions of setuptools this
# will invoke a VersionConflict when trying to install an upgrade
ws = pkg_resources.WorkingSet([])
ws.add_entry(path)
dist = ws.by_key.get(DIST_NAME)
if dist is None:
# We didn't find an egg-info/dist-info in the given path, but if a
# setup.py exists we can generate it
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
with _silence():
run_setup(os.path.join(path, 'setup.py'),
['egg_info'])
for dist in pkg_resources.find_distributions(path, True):
# There should be only one...
return dist
return dist
def _do_download(self, version='', find_links=None):
if find_links:
allow_hosts = ''
index_url = None
else:
allow_hosts = None
index_url = self.index_url
# Annoyingly, setuptools will not handle other arguments to
# Distribution (such as options) before handling setup_requires, so it
# is not straightforward to programmatically augment the arguments which
# are passed to easy_install
class _Distribution(Distribution):
def get_option_dict(self, command_name):
opts = Distribution.get_option_dict(self, command_name)
if command_name == 'easy_install':
if find_links is not None:
opts['find_links'] = ('setup script', find_links)
if index_url is not None:
opts['index_url'] = ('setup script', index_url)
if allow_hosts is not None:
opts['allow_hosts'] = ('setup script', allow_hosts)
return opts
if version:
req = '{0}=={1}'.format(DIST_NAME, version)
else:
req = DIST_NAME
attrs = {'setup_requires': [req]}
try:
if DEBUG:
_Distribution(attrs=attrs)
else:
with _silence():
_Distribution(attrs=attrs)
# If the setup_requires succeeded it will have added the new dist to
# the main working_set
return pkg_resources.working_set.by_key.get(DIST_NAME)
except Exception as e:
if DEBUG:
raise
msg = 'Error retrieving {0} from {1}:\n{2}'
if find_links:
source = find_links[0]
elif index_url != INDEX_URL:
source = index_url
else:
source = 'PyPI'
raise Exception(msg.format(DIST_NAME, source, repr(e)))
def _do_upgrade(self, dist):
# Build up a requirement for a higher bugfix release but a lower minor
# release (so API compatibility is guaranteed)
next_version = _next_version(dist.parsed_version)
req = pkg_resources.Requirement.parse(
'{0}>{1},<{2}'.format(DIST_NAME, dist.version, next_version))
package_index = PackageIndex(index_url=self.index_url)
upgrade = package_index.obtain(req)
if upgrade is not None:
return self._do_download(version=upgrade.version)
def _check_submodule(self):
"""
Check if the given path is a git submodule.
See the docstrings for ``_check_submodule_using_git`` and
``_check_submodule_no_git`` for further details.
"""
if (self.path is None or
(os.path.exists(self.path) and not os.path.isdir(self.path))):
return False
if self.use_git:
return self._check_submodule_using_git()
else:
return self._check_submodule_no_git()
def _check_submodule_using_git(self):
"""
Check if the given path is a git submodule. If so, attempt to initialize
and/or update the submodule if needed.
This function makes calls to the ``git`` command in subprocesses. The
``_check_submodule_no_git`` option uses pure Python to check if the given
path looks like a git submodule, but it cannot perform updates.
"""
cmd = ['git', 'submodule', 'status', '--', self.path]
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except _CommandNotFound:
# The git command simply wasn't found; this is most likely the
# case on user systems that don't have git and are simply
# trying to install the package from PyPI or a source
# distribution. Silently ignore this case and simply don't try
# to use submodules
return False
stderr = stderr.strip()
if returncode != 0 and stderr:
# Unfortunately the return code alone cannot be relied on, as
# earlier versions of git returned 0 even if the requested submodule
# does not exist
# This is a warning that occurs in perl (from running git submodule)
# which only occurs with a malformatted locale setting which can
# happen sometimes on OSX. See again
# https://github.com/astropy/astropy/issues/2749
perl_warning = ('perl: warning: Falling back to the standard locale '
'("C").')
if not stderr.strip().endswith(perl_warning):
# Some other unknown error condition occurred
log.warn('git submodule command failed '
'unexpectedly:\n{0}'.format(stderr))
return False
# Output of `git submodule status` is as follows:
#
# 1: Status indicator: '-' for submodule is uninitialized, '+' if
# submodule is initialized but is not at the commit currently indicated
# in .gitmodules (and thus needs to be updated), or 'U' if the
# submodule is in an unstable state (i.e. has merge conflicts)
#
# 2. SHA-1 hash of the current commit of the submodule (we don't really
# need this information but it's useful for checking that the output is
# correct)
#
# 3. The output of `git describe` for the submodule's current commit
# hash (this includes for example what branches the commit is on) but
# only if the submodule is initialized. We ignore this information for
# now
_git_submodule_status_re = re.compile(
'^(?P<status>[+-U ])(?P<commit>[0-9a-f]{40}) '
'(?P<submodule>\S+)( .*)?$')
# The stdout should only contain one line--the status of the
# requested submodule
m = _git_submodule_status_re.match(stdout)
if m:
# Yes, the path *is* a git submodule
self._update_submodule(m.group('submodule'), m.group('status'))
return True
else:
log.warn(
'Unexpected output from `git submodule status`:\n{0}\n'
'Will attempt import from {1!r} regardless.'.format(
stdout, self.path))
return False
def _check_submodule_no_git(self):
"""
Like ``_check_submodule_using_git``, but simply parses the .gitmodules file
to determine if the supplied path is a git submodule, and does not exec any
subprocesses.
This can only determine if a path is a submodule--it does not perform
updates, etc. This function may need to be updated if the format of the
.gitmodules file is changed between git versions.
"""
gitmodules_path = os.path.abspath('.gitmodules')
if not os.path.isfile(gitmodules_path):
return False
# This is a minimal reader for gitconfig-style files. It handles a few of
# the quirks that make gitconfig files incompatible with ConfigParser-style
# files, but does not support the full gitconfig syntax (just enough
# needed to read a .gitmodules file).
gitmodules_fileobj = io.StringIO()
# Must use io.open for cross-Python-compatible behavior wrt unicode
with io.open(gitmodules_path) as f:
for line in f:
# gitconfig files are more flexible with leading whitespace; just
# go ahead and remove it
line = line.lstrip()
# comments can start with either # or ;
if line and line[0] in (':', ';'):
continue
gitmodules_fileobj.write(line)
gitmodules_fileobj.seek(0)
cfg = RawConfigParser()
try:
cfg.readfp(gitmodules_fileobj)
except Exception as exc:
log.warn('Malformatted .gitmodules file: {0}\n'
'{1} cannot be assumed to be a git submodule.'.format(
exc, self.path))
return False
for section in cfg.sections():
if not cfg.has_option(section, 'path'):
continue
submodule_path = cfg.get(section, 'path').rstrip(os.sep)
if submodule_path == self.path.rstrip(os.sep):
return True
return False
def _update_submodule(self, submodule, status):
if status == ' ':
# The submodule is up to date; no action necessary
return
elif status == '-':
if self.offline:
raise _AHBootstrapSystemExit(
"Cannot initialize the {0} submodule in --offline mode; "
"this requires being able to clone the submodule from an "
"online repository.".format(submodule))
cmd = ['update', '--init']
action = 'Initializing'
elif status == '+':
cmd = ['update']
action = 'Updating'
if self.offline:
cmd.append('--no-fetch')
elif status == 'U':
raise _AHBoostrapSystemExit(
'Error: Submodule {0} contains unresolved merge conflicts. '
'Please complete or abandon any changes in the submodule so that '
'it is in a usable state, then try again.'.format(submodule))
else:
log.warn('Unknown status {0!r} for git submodule {1!r}. Will '
'attempt to use the submodule as-is, but try to ensure '
'that the submodule is in a clean state and contains no '
'conflicts or errors.\n{2}'.format(status, submodule,
_err_help_msg))
return
err_msg = None
cmd = ['git', 'submodule'] + cmd + ['--', submodule]
log.warn('{0} {1} submodule with: `{2}`'.format(
action, submodule, ' '.join(cmd)))
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except OSError as e:
err_msg = str(e)
else:
if returncode != 0:
err_msg = stderr
if err_msg is not None:
log.warn('An unexpected error occurred updating the git submodule '
'{0!r}:\n{1}\n{2}'.format(submodule, err_msg,
_err_help_msg))
class _CommandNotFound(OSError):
"""
An exception raised when a command run with run_cmd is not found on the
system.
"""
def run_cmd(cmd):
"""
Run a command in a subprocess, given as a list of command-line
arguments.
Returns a ``(returncode, stdout, stderr)`` tuple.
"""
try:
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
# XXX: May block if either stdout or stderr fill their buffers;
# however for the commands this is currently used for that is
# unlikely (they should have very brief output)
stdout, stderr = p.communicate()
except OSError as e:
if DEBUG:
raise
if e.errno == errno.ENOENT:
msg = 'Command not found: `{0}`'.format(' '.join(cmd))
raise _CommandNotFound(msg, cmd)
else:
raise _AHBoostrapSystemExit(
'An unexpected error occurred when running the '
'`{0}` command:\n{1}'.format(' '.join(cmd), str(e)))
# Can fail of the default locale is not configured properly. See
# https://github.com/astropy/astropy/issues/2749. For the purposes under
# consideration 'latin1' is an acceptable fallback.
try:
stdio_encoding = locale.getdefaultlocale()[1] or 'latin1'
except ValueError:
# Due to an OSX oddity locale.getdefaultlocale() can also crash
# depending on the user's locale/language settings. See:
# http://bugs.python.org/issue18378
stdio_encoding = 'latin1'
# Unlikely to fail at this point but even then let's be flexible
if not isinstance(stdout, _text_type):
stdout = stdout.decode(stdio_encoding, 'replace')
if not isinstance(stderr, _text_type):
stderr = stderr.decode(stdio_encoding, 'replace')
return (p.returncode, stdout, stderr)
def _next_version(version):
"""
Given a parsed version from pkg_resources.parse_version, returns a new
version string with the next minor version.
Examples
========
>>> _next_version(pkg_resources.parse_version('1.2.3'))
'1.3.0'
"""
if hasattr(version, 'base_version'):
# New version parsing from setuptools >= 8.0
if version.base_version:
parts = version.base_version.split('.')
else:
parts = []
else:
parts = []
for part in version:
if part.startswith('*'):
break
parts.append(part)
parts = [int(p) for p in parts]
if len(parts) < 3:
parts += [0] * (3 - len(parts))
major, minor, micro = parts[:3]
return '{0}.{1}.{2}'.format(major, minor + 1, 0)
class _DummyFile(object):
"""A noop writeable object."""
errors = '' # Required for Python 3.x
encoding = 'utf-8'
def write(self, s):
pass
def flush(self):
pass
@contextlib.contextmanager
def _silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
exception_occurred = False
try:
yield
except:
exception_occurred = True
# Go ahead and clean up so that exception handling can work normally
sys.stdout = old_stdout
sys.stderr = old_stderr
raise
if not exception_occurred:
sys.stdout = old_stdout
sys.stderr = old_stderr
_err_help_msg = """
If the problem persists consider installing astropy_helpers manually using pip
(`pip install astropy_helpers`) or by manually downloading the source archive,
extracting it, and installing by running `python setup.py install` from the
root of the extracted source code.
"""
class _AHBootstrapSystemExit(SystemExit):
def __init__(self, *args):
if not args:
msg = 'An unknown problem occurred bootstrapping astropy_helpers.'
else:
msg = args[0]
msg += '\n' + _err_help_msg
super(_AHBootstrapSystemExit, self).__init__(msg, *args[1:])
if sys.version_info[:2] < (2, 7):
# In Python 2.6 the distutils log does not log warnings, errors, etc. to
# stderr so we have to wrap it to ensure consistency at least in this
# module
import distutils
class log(object):
def __getattr__(self, attr):
return getattr(distutils.log, attr)
def warn(self, msg, *args):
self._log_to_stderr(distutils.log.WARN, msg, *args)
def error(self, msg):
self._log_to_stderr(distutils.log.ERROR, msg, *args)
def fatal(self, msg):
self._log_to_stderr(distutils.log.FATAL, msg, *args)
def log(self, level, msg, *args):
if level in (distutils.log.WARN, distutils.log.ERROR,
distutils.log.FATAL):
self._log_to_stderr(level, msg, *args)
else:
distutils.log.log(level, msg, *args)
def _log_to_stderr(self, level, msg, *args):
# This is the only truly 'public' way to get the current threshold
# of the log
current_threshold = distutils.log.set_threshold(distutils.log.WARN)
distutils.log.set_threshold(current_threshold)
if level >= current_threshold:
if args:
msg = msg % args
sys.stderr.write('%s\n' % msg)
sys.stderr.flush()
log = log()
BOOTSTRAPPER = _Bootstrapper.main()
def use_astropy_helpers(**kwargs):
"""
Ensure that the `astropy_helpers` module is available and is importable.
This supports automatic submodule initialization if astropy_helpers is
included in a project as a git submodule, or will download it from PyPI if
necessary.
Parameters
----------
path : str or None, optional
A filesystem path relative to the root of the project's source code
that should be added to `sys.path` so that `astropy_helpers` can be
imported from that path.
If the path is a git submodule it will automatically be initialized
and/or updated.
The path may also be to a ``.tar.gz`` archive of the astropy_helpers
source distribution. In this case the archive is automatically
unpacked and made temporarily available on `sys.path` as a ``.egg``
archive.
If `None` skip straight to downloading.
download_if_needed : bool, optional
If the provided filesystem path is not found an attempt will be made to
download astropy_helpers from PyPI. It will then be made temporarily
available on `sys.path` as a ``.egg`` archive (using the
``setup_requires`` feature of setuptools. If the ``--offline`` option
is given at the command line the value of this argument is overridden
to `False`.
index_url : str, optional
If provided, use a different URL for the Python package index than the
main PyPI server.
use_git : bool, optional
If `False` no git commands will be used--this effectively disables
support for git submodules. If the ``--no-git`` option is given at the
command line the value of this argument is overridden to `False`.
auto_upgrade : bool, optional
By default, when installing a package from a non-development source
distribution ah_boostrap will try to automatically check for patch
releases to astropy-helpers on PyPI and use the patched version over
any bundled versions. Setting this to `False` will disable that
functionality. If the ``--offline`` option is given at the command line
the value of this argument is overridden to `False`.
offline : bool, optional
If `False` disable all actions that require an internet connection,
including downloading packages from the package index and fetching
updates to any git submodule. Defaults to `True`.
"""
global BOOTSTRAPPER
config = BOOTSTRAPPER.config
config.update(**kwargs)
# Create a new bootstrapper with the updated configuration and run it
BOOTSTRAPPER = _Bootstrapper(**config)
BOOTSTRAPPER.run()
| bsd-3-clause |
erikrose/more-itertools | more_itertools/more.py | 1 | 96023 | import warnings
from collections import Counter, defaultdict, deque, abc
from collections.abc import Sequence
from functools import partial, wraps
from heapq import merge, heapify, heapreplace, heappop
from itertools import (
chain,
compress,
count,
cycle,
dropwhile,
groupby,
islice,
repeat,
starmap,
takewhile,
tee,
zip_longest,
)
from math import exp, floor, log
from random import random, randrange, uniform
from operator import itemgetter, sub, gt, lt
from sys import maxsize
from time import monotonic
from .recipes import consume, flatten, powerset, take, unique_everseen
__all__ = [
'adjacent',
'always_iterable',
'always_reversible',
'bucket',
'chunked',
'circular_shifts',
'collapse',
'collate',
'consecutive_groups',
'consumer',
'count_cycle',
'difference',
'distinct_combinations',
'distinct_permutations',
'distribute',
'divide',
'exactly_n',
'filter_except',
'first',
'groupby_transform',
'ilen',
'interleave_longest',
'interleave',
'intersperse',
'islice_extended',
'iterate',
'ichunked',
'last',
'locate',
'lstrip',
'make_decorator',
'map_except',
'map_reduce',
'nth_or_last',
'numeric_range',
'one',
'only',
'padded',
'partitions',
'set_partitions',
'peekable',
'repeat_last',
'replace',
'rlocate',
'rstrip',
'run_length',
'sample',
'seekable',
'SequenceView',
'side_effect',
'sliced',
'sort_together',
'split_at',
'split_after',
'split_before',
'split_when',
'split_into',
'spy',
'stagger',
'strip',
'substrings',
'substrings_indexes',
'time_limited',
'unique_to_each',
'unzip',
'windowed',
'with_iter',
'zip_offset',
]
_marker = object()
def chunked(iterable, n):
"""Break *iterable* into lists of length *n*:
>>> list(chunked([1, 2, 3, 4, 5, 6], 3))
[[1, 2, 3], [4, 5, 6]]
If the length of *iterable* is not evenly divisible by *n*, the last
returned list will be shorter:
>>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3))
[[1, 2, 3], [4, 5, 6], [7, 8]]
To use a fill-in value instead, see the :func:`grouper` recipe.
:func:`chunked` is useful for splitting up a computation on a large number
of keys into batches, to be pickled and sent off to worker processes. One
example is operations on rows in MySQL, which does not implement
server-side cursors properly and would otherwise load the entire dataset
into RAM on the client.
"""
return iter(partial(take, n, iter(iterable)), [])
def first(iterable, default=_marker):
"""Return the first item of *iterable*, or *default* if *iterable* is
empty.
>>> first([0, 1, 2, 3])
0
>>> first([], 'some default')
'some default'
If *default* is not provided and there are no items in the iterable,
raise ``ValueError``.
:func:`first` is useful when you have a generator of expensive-to-retrieve
values and want any arbitrary one. It is marginally shorter than
``next(iter(iterable), default)``.
"""
try:
return next(iter(iterable))
except StopIteration:
# I'm on the edge about raising ValueError instead of StopIteration. At
# the moment, ValueError wins, because the caller could conceivably
# want to do something different with flow control when I raise the
# exception, and it's weird to explicitly catch StopIteration.
if default is _marker:
raise ValueError(
'first() was called on an empty iterable, and no '
'default value was provided.'
)
return default
def last(iterable, default=_marker):
"""Return the last item of *iterable*, or *default* if *iterable* is
empty.
>>> last([0, 1, 2, 3])
3
>>> last([], 'some default')
'some default'
If *default* is not provided and there are no items in the iterable,
raise ``ValueError``.
"""
try:
try:
# Try to access the last item directly
return iterable[-1]
except (TypeError, AttributeError, KeyError):
# If not slice-able, iterate entirely using length-1 deque
return deque(iterable, maxlen=1)[0]
except IndexError: # If the iterable was empty
if default is _marker:
raise ValueError(
'last() was called on an empty iterable, and no '
'default value was provided.'
)
return default
def nth_or_last(iterable, n, default=_marker):
"""Return the nth or the last item of *iterable*,
or *default* if *iterable* is empty.
>>> nth_or_last([0, 1, 2, 3], 2)
2
>>> nth_or_last([0, 1], 2)
1
>>> nth_or_last([], 0, 'some default')
'some default'
If *default* is not provided and there are no items in the iterable,
raise ``ValueError``.
"""
return last(islice(iterable, n + 1), default=default)
class peekable:
"""Wrap an iterator to allow lookahead and prepending elements.
Call :meth:`peek` on the result to get the value that will be returned
by :func:`next`. This won't advance the iterator:
>>> p = peekable(['a', 'b'])
>>> p.peek()
'a'
>>> next(p)
'a'
Pass :meth:`peek` a default value to return that instead of raising
``StopIteration`` when the iterator is exhausted.
>>> p = peekable([])
>>> p.peek('hi')
'hi'
peekables also offer a :meth:`prepend` method, which "inserts" items
at the head of the iterable:
>>> p = peekable([1, 2, 3])
>>> p.prepend(10, 11, 12)
>>> next(p)
10
>>> p.peek()
11
>>> list(p)
[11, 12, 1, 2, 3]
peekables can be indexed. Index 0 is the item that will be returned by
:func:`next`, index 1 is the item after that, and so on:
The values up to the given index will be cached.
>>> p = peekable(['a', 'b', 'c', 'd'])
>>> p[0]
'a'
>>> p[1]
'b'
>>> next(p)
'a'
Negative indexes are supported, but be aware that they will cache the
remaining items in the source iterator, which may require significant
storage.
To check whether a peekable is exhausted, check its truth value:
>>> p = peekable(['a', 'b'])
>>> if p: # peekable has items
... list(p)
['a', 'b']
>>> if not p: # peekable is exhaused
... list(p)
[]
"""
def __init__(self, iterable):
self._it = iter(iterable)
self._cache = deque()
def __iter__(self):
return self
def __bool__(self):
try:
self.peek()
except StopIteration:
return False
return True
def peek(self, default=_marker):
"""Return the item that will be next returned from ``next()``.
Return ``default`` if there are no items left. If ``default`` is not
provided, raise ``StopIteration``.
"""
if not self._cache:
try:
self._cache.append(next(self._it))
except StopIteration:
if default is _marker:
raise
return default
return self._cache[0]
def prepend(self, *items):
"""Stack up items to be the next ones returned from ``next()`` or
``self.peek()``. The items will be returned in
first in, first out order::
>>> p = peekable([1, 2, 3])
>>> p.prepend(10, 11, 12)
>>> next(p)
10
>>> list(p)
[11, 12, 1, 2, 3]
It is possible, by prepending items, to "resurrect" a peekable that
previously raised ``StopIteration``.
>>> p = peekable([])
>>> next(p)
Traceback (most recent call last):
...
StopIteration
>>> p.prepend(1)
>>> next(p)
1
>>> next(p)
Traceback (most recent call last):
...
StopIteration
"""
self._cache.extendleft(reversed(items))
def __next__(self):
if self._cache:
return self._cache.popleft()
return next(self._it)
def _get_slice(self, index):
# Normalize the slice's arguments
step = 1 if (index.step is None) else index.step
if step > 0:
start = 0 if (index.start is None) else index.start
stop = maxsize if (index.stop is None) else index.stop
elif step < 0:
start = -1 if (index.start is None) else index.start
stop = (-maxsize - 1) if (index.stop is None) else index.stop
else:
raise ValueError('slice step cannot be zero')
# If either the start or stop index is negative, we'll need to cache
# the rest of the iterable in order to slice from the right side.
if (start < 0) or (stop < 0):
self._cache.extend(self._it)
# Otherwise we'll need to find the rightmost index and cache to that
# point.
else:
n = min(max(start, stop) + 1, maxsize)
cache_len = len(self._cache)
if n >= cache_len:
self._cache.extend(islice(self._it, n - cache_len))
return list(self._cache)[index]
def __getitem__(self, index):
if isinstance(index, slice):
return self._get_slice(index)
cache_len = len(self._cache)
if index < 0:
self._cache.extend(self._it)
elif index >= cache_len:
self._cache.extend(islice(self._it, index + 1 - cache_len))
return self._cache[index]
def collate(*iterables, **kwargs):
"""Return a sorted merge of the items from each of several already-sorted
*iterables*.
>>> list(collate('ACDZ', 'AZ', 'JKL'))
['A', 'A', 'C', 'D', 'J', 'K', 'L', 'Z', 'Z']
Works lazily, keeping only the next value from each iterable in memory. Use
:func:`collate` to, for example, perform a n-way mergesort of items that
don't fit in memory.
If a *key* function is specified, the iterables will be sorted according
to its result:
>>> key = lambda s: int(s) # Sort by numeric value, not by string
>>> list(collate(['1', '10'], ['2', '11'], key=key))
['1', '2', '10', '11']
If the *iterables* are sorted in descending order, set *reverse* to
``True``:
>>> list(collate([5, 3, 1], [4, 2, 0], reverse=True))
[5, 4, 3, 2, 1, 0]
If the elements of the passed-in iterables are out of order, you might get
unexpected results.
On Python 3.5+, this function is an alias for :func:`heapq.merge`.
"""
warnings.warn(
"collate is no longer part of more_itertools, use heapq.merge",
DeprecationWarning,
)
return merge(*iterables, **kwargs)
def consumer(func):
"""Decorator that automatically advances a PEP-342-style "reverse iterator"
to its first yield point so you don't have to call ``next()`` on it
manually.
>>> @consumer
... def tally():
... i = 0
... while True:
... print('Thing number %s is %s.' % (i, (yield)))
... i += 1
...
>>> t = tally()
>>> t.send('red')
Thing number 0 is red.
>>> t.send('fish')
Thing number 1 is fish.
Without the decorator, you would have to call ``next(t)`` before
``t.send()`` could be used.
"""
@wraps(func)
def wrapper(*args, **kwargs):
gen = func(*args, **kwargs)
next(gen)
return gen
return wrapper
def ilen(iterable):
"""Return the number of items in *iterable*.
>>> ilen(x for x in range(1000000) if x % 3 == 0)
333334
This consumes the iterable, so handle with care.
"""
# This approach was selected because benchmarks showed it's likely the
# fastest of the known implementations at the time of writing.
# See GitHub tracker: #236, #230.
counter = count()
deque(zip(iterable, counter), maxlen=0)
return next(counter)
def iterate(func, start):
"""Return ``start``, ``func(start)``, ``func(func(start))``, ...
>>> from itertools import islice
>>> list(islice(iterate(lambda x: 2*x, 1), 10))
[1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
"""
while True:
yield start
start = func(start)
def with_iter(context_manager):
"""Wrap an iterable in a ``with`` statement, so it closes once exhausted.
For example, this will close the file when the iterator is exhausted::
upper_lines = (line.upper() for line in with_iter(open('foo')))
Any context manager which returns an iterable is a candidate for
``with_iter``.
"""
with context_manager as iterable:
yield from iterable
def one(iterable, too_short=None, too_long=None):
"""Return the first item from *iterable*, which is expected to contain only
that item. Raise an exception if *iterable* is empty or has more than one
item.
:func:`one` is useful for ensuring that an iterable contains only one item.
For example, it can be used to retrieve the result of a database query
that is expected to return a single row.
If *iterable* is empty, ``ValueError`` will be raised. You may specify a
different exception with the *too_short* keyword:
>>> it = []
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too many items in iterable (expected 1)'
>>> too_short = IndexError('too few items')
>>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
IndexError: too few items
Similarly, if *iterable* contains more than one item, ``ValueError`` will
be raised. You may specify a different exception with the *too_long*
keyword:
>>> it = ['too', 'many']
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: Expected exactly one item in iterable, but got 'too',
'many', and perhaps more.
>>> too_long = RuntimeError
>>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
RuntimeError
Note that :func:`one` attempts to advance *iterable* twice to ensure there
is only one item. See :func:`spy` or :func:`peekable` to check iterable
contents less destructively.
"""
it = iter(iterable)
try:
first_value = next(it)
except StopIteration:
raise too_short or ValueError('too few items in iterable (expected 1)')
try:
second_value = next(it)
except StopIteration:
pass
else:
msg = (
'Expected exactly one item in iterable, but got {!r}, {!r}, '
'and perhaps more.'.format(first_value, second_value)
)
raise too_long or ValueError(msg)
return first_value
def distinct_permutations(iterable):
"""Yield successive distinct permutations of the elements in *iterable*.
>>> sorted(distinct_permutations([1, 0, 1]))
[(0, 1, 1), (1, 0, 1), (1, 1, 0)]
Equivalent to ``set(permutations(iterable))``, except duplicates are not
generated and thrown away. For larger input sequences this is much more
efficient.
Duplicate permutations arise when there are duplicated elements in the
input iterable. The number of items returned is
`n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of
items input, and each `x_i` is the count of a distinct item in the input
sequence.
"""
def make_new_permutations(pool, e):
"""Internal helper function.
The output permutations are built up by adding element *e* to the
current *permutations* at every possible position.
The key idea is to keep repeated elements (reverse) ordered:
if e1 == e2 and e1 is before e2 in the iterable, then all permutations
with e1 before e2 are ignored.
"""
for perm in pool:
for j in range(len(perm)):
yield perm[:j] + (e,) + perm[j:]
if perm[j] == e:
break
else:
yield perm + (e,)
permutations = [()]
for e in iterable:
permutations = make_new_permutations(permutations, e)
return (tuple(t) for t in permutations)
def intersperse(e, iterable, n=1):
"""Intersperse filler element *e* among the items in *iterable*, leaving
*n* items between each filler element.
>>> list(intersperse('!', [1, 2, 3, 4, 5]))
[1, '!', 2, '!', 3, '!', 4, '!', 5]
>>> list(intersperse(None, [1, 2, 3, 4, 5], n=2))
[1, 2, None, 3, 4, None, 5]
"""
if n == 0:
raise ValueError('n must be > 0')
elif n == 1:
# interleave(repeat(e), iterable) -> e, x_0, e, e, x_1, e, x_2...
# islice(..., 1, None) -> x_0, e, e, x_1, e, x_2...
return islice(interleave(repeat(e), iterable), 1, None)
else:
# interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]...
# islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]...
# flatten(...) -> x_0, x_1, e, x_2, x_3...
filler = repeat([e])
chunks = chunked(iterable, n)
return flatten(islice(interleave(filler, chunks), 1, None))
def unique_to_each(*iterables):
"""Return the elements from each of the input iterables that aren't in the
other input iterables.
For example, suppose you have a set of packages, each with a set of
dependencies::
{'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}}
If you remove one package, which dependencies can also be removed?
If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not
associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for
``pkg_2``, and ``D`` is only needed for ``pkg_3``::
>>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'})
[['A'], ['C'], ['D']]
If there are duplicates in one input iterable that aren't in the others
they will be duplicated in the output. Input order is preserved::
>>> unique_to_each("mississippi", "missouri")
[['p', 'p'], ['o', 'u', 'r']]
It is assumed that the elements of each iterable are hashable.
"""
pool = [list(it) for it in iterables]
counts = Counter(chain.from_iterable(map(set, pool)))
uniques = {element for element in counts if counts[element] == 1}
return [list(filter(uniques.__contains__, it)) for it in pool]
def windowed(seq, n, fillvalue=None, step=1):
"""Return a sliding window of width *n* over the given iterable.
>>> all_windows = windowed([1, 2, 3, 4, 5], 3)
>>> list(all_windows)
[(1, 2, 3), (2, 3, 4), (3, 4, 5)]
When the window is larger than the iterable, *fillvalue* is used in place
of missing values::
>>> list(windowed([1, 2, 3], 4))
[(1, 2, 3, None)]
Each window will advance in increments of *step*:
>>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2))
[(1, 2, 3), (3, 4, 5), (5, 6, '!')]
To slide into the iterable's items, use :func:`chain` to add filler items
to the left:
>>> iterable = [1, 2, 3, 4]
>>> n = 3
>>> padding = [None] * (n - 1)
>>> list(windowed(chain(padding, iterable), 3))
[(None, None, 1), (None, 1, 2), (1, 2, 3), (2, 3, 4)]
"""
if n < 0:
raise ValueError('n must be >= 0')
if n == 0:
yield tuple()
return
if step < 1:
raise ValueError('step must be >= 1')
it = iter(seq)
window = deque([], n)
append = window.append
# Initial deque fill
for _ in range(n):
append(next(it, fillvalue))
yield tuple(window)
# Appending new items to the right causes old items to fall off the left
i = 0
for item in it:
append(item)
i = (i + 1) % step
if i % step == 0:
yield tuple(window)
# If there are items from the iterable in the window, pad with the given
# value and emit them.
if (i % step) and (step - i < n):
for _ in range(step - i):
append(fillvalue)
yield tuple(window)
def substrings(iterable):
"""Yield all of the substrings of *iterable*.
>>> [''.join(s) for s in substrings('more')]
['m', 'o', 'r', 'e', 'mo', 'or', 're', 'mor', 'ore', 'more']
Note that non-string iterables can also be subdivided.
>>> list(substrings([0, 1, 2]))
[(0,), (1,), (2,), (0, 1), (1, 2), (0, 1, 2)]
"""
# The length-1 substrings
seq = []
for item in iter(iterable):
seq.append(item)
yield (item,)
seq = tuple(seq)
item_count = len(seq)
# And the rest
for n in range(2, item_count + 1):
for i in range(item_count - n + 1):
yield seq[i : i + n]
def substrings_indexes(seq, reverse=False):
"""Yield all substrings and their positions in *seq*
The items yielded will be a tuple of the form ``(substr, i, j)``, where
``substr == seq[i:j]``.
This function only works for iterables that support slicing, such as
``str`` objects.
>>> for item in substrings_indexes('more'):
... print(item)
('m', 0, 1)
('o', 1, 2)
('r', 2, 3)
('e', 3, 4)
('mo', 0, 2)
('or', 1, 3)
('re', 2, 4)
('mor', 0, 3)
('ore', 1, 4)
('more', 0, 4)
Set *reverse* to ``True`` to yield the same items in the opposite order.
"""
r = range(1, len(seq) + 1)
if reverse:
r = reversed(r)
return (
(seq[i : i + L], i, i + L) for L in r for i in range(len(seq) - L + 1)
)
class bucket:
"""Wrap *iterable* and return an object that buckets it iterable into
child iterables based on a *key* function.
>>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3']
>>> s = bucket(iterable, key=lambda x: x[0]) # Bucket by 1st character
>>> sorted(list(s)) # Get the keys
['a', 'b', 'c']
>>> a_iterable = s['a']
>>> next(a_iterable)
'a1'
>>> next(a_iterable)
'a2'
>>> list(s['b'])
['b1', 'b2', 'b3']
The original iterable will be advanced and its items will be cached until
they are used by the child iterables. This may require significant storage.
By default, attempting to select a bucket to which no items belong will
exhaust the iterable and cache all values.
If you specify a *validator* function, selected buckets will instead be
checked against it.
>>> from itertools import count
>>> it = count(1, 2) # Infinite sequence of odd numbers
>>> key = lambda x: x % 10 # Bucket by last digit
>>> validator = lambda x: x in {1, 3, 5, 7, 9} # Odd digits only
>>> s = bucket(it, key=key, validator=validator)
>>> 2 in s
False
>>> list(s[2])
[]
"""
def __init__(self, iterable, key, validator=None):
self._it = iter(iterable)
self._key = key
self._cache = defaultdict(deque)
self._validator = validator or (lambda x: True)
def __contains__(self, value):
if not self._validator(value):
return False
try:
item = next(self[value])
except StopIteration:
return False
else:
self._cache[value].appendleft(item)
return True
def _get_values(self, value):
"""
Helper to yield items from the parent iterator that match *value*.
Items that don't match are stored in the local cache as they
are encountered.
"""
while True:
# If we've cached some items that match the target value, emit
# the first one and evict it from the cache.
if self._cache[value]:
yield self._cache[value].popleft()
# Otherwise we need to advance the parent iterator to search for
# a matching item, caching the rest.
else:
while True:
try:
item = next(self._it)
except StopIteration:
return
item_value = self._key(item)
if item_value == value:
yield item
break
elif self._validator(item_value):
self._cache[item_value].append(item)
def __iter__(self):
for item in self._it:
item_value = self._key(item)
if self._validator(item_value):
self._cache[item_value].append(item)
yield from self._cache.keys()
def __getitem__(self, value):
if not self._validator(value):
return iter(())
return self._get_values(value)
def spy(iterable, n=1):
"""Return a 2-tuple with a list containing the first *n* elements of
*iterable*, and an iterator with the same items as *iterable*.
This allows you to "look ahead" at the items in the iterable without
advancing it.
There is one item in the list by default:
>>> iterable = 'abcdefg'
>>> head, iterable = spy(iterable)
>>> head
['a']
>>> list(iterable)
['a', 'b', 'c', 'd', 'e', 'f', 'g']
You may use unpacking to retrieve items instead of lists:
>>> (head,), iterable = spy('abcdefg')
>>> head
'a'
>>> (first, second), iterable = spy('abcdefg', 2)
>>> first
'a'
>>> second
'b'
The number of items requested can be larger than the number of items in
the iterable:
>>> iterable = [1, 2, 3, 4, 5]
>>> head, iterable = spy(iterable, 10)
>>> head
[1, 2, 3, 4, 5]
>>> list(iterable)
[1, 2, 3, 4, 5]
"""
it = iter(iterable)
head = take(n, it)
return head, chain(head, it)
def interleave(*iterables):
"""Return a new iterable yielding from each iterable in turn,
until the shortest is exhausted.
>>> list(interleave([1, 2, 3], [4, 5], [6, 7, 8]))
[1, 4, 6, 2, 5, 7]
For a version that doesn't terminate after the shortest iterable is
exhausted, see :func:`interleave_longest`.
"""
return chain.from_iterable(zip(*iterables))
def interleave_longest(*iterables):
"""Return a new iterable yielding from each iterable in turn,
skipping any that are exhausted.
>>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8]))
[1, 4, 6, 2, 5, 7, 3, 8]
This function produces the same output as :func:`roundrobin`, but may
perform better for some inputs (in particular when the number of iterables
is large).
"""
i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker))
return (x for x in i if x is not _marker)
def collapse(iterable, base_type=None, levels=None):
"""Flatten an iterable with multiple levels of nesting (e.g., a list of
lists of tuples) into non-iterable types.
>>> iterable = [(1, 2), ([3, 4], [[5], [6]])]
>>> list(collapse(iterable))
[1, 2, 3, 4, 5, 6]
Binary and text strings are not considered iterable and
will not be collapsed.
To avoid collapsing other types, specify *base_type*:
>>> iterable = ['ab', ('cd', 'ef'), ['gh', 'ij']]
>>> list(collapse(iterable, base_type=tuple))
['ab', ('cd', 'ef'), 'gh', 'ij']
Specify *levels* to stop flattening after a certain level:
>>> iterable = [('a', ['b']), ('c', ['d'])]
>>> list(collapse(iterable)) # Fully flattened
['a', 'b', 'c', 'd']
>>> list(collapse(iterable, levels=1)) # Only one level flattened
['a', ['b'], 'c', ['d']]
"""
def walk(node, level):
if (
((levels is not None) and (level > levels))
or isinstance(node, (str, bytes))
or ((base_type is not None) and isinstance(node, base_type))
):
yield node
return
try:
tree = iter(node)
except TypeError:
yield node
return
else:
for child in tree:
yield from walk(child, level + 1)
yield from walk(iterable, 0)
def side_effect(func, iterable, chunk_size=None, before=None, after=None):
"""Invoke *func* on each item in *iterable* (or on each *chunk_size* group
of items) before yielding the item.
`func` must be a function that takes a single argument. Its return value
will be discarded.
*before* and *after* are optional functions that take no arguments. They
will be executed before iteration starts and after it ends, respectively.
`side_effect` can be used for logging, updating progress bars, or anything
that is not functionally "pure."
Emitting a status message:
>>> from more_itertools import consume
>>> func = lambda item: print('Received {}'.format(item))
>>> consume(side_effect(func, range(2)))
Received 0
Received 1
Operating on chunks of items:
>>> pair_sums = []
>>> func = lambda chunk: pair_sums.append(sum(chunk))
>>> list(side_effect(func, [0, 1, 2, 3, 4, 5], 2))
[0, 1, 2, 3, 4, 5]
>>> list(pair_sums)
[1, 5, 9]
Writing to a file-like object:
>>> from io import StringIO
>>> from more_itertools import consume
>>> f = StringIO()
>>> func = lambda x: print(x, file=f)
>>> before = lambda: print(u'HEADER', file=f)
>>> after = f.close
>>> it = [u'a', u'b', u'c']
>>> consume(side_effect(func, it, before=before, after=after))
>>> f.closed
True
"""
try:
if before is not None:
before()
if chunk_size is None:
for item in iterable:
func(item)
yield item
else:
for chunk in chunked(iterable, chunk_size):
func(chunk)
yield from chunk
finally:
if after is not None:
after()
def sliced(seq, n):
"""Yield slices of length *n* from the sequence *seq*.
>>> list(sliced((1, 2, 3, 4, 5, 6), 3))
[(1, 2, 3), (4, 5, 6)]
If the length of the sequence is not divisible by the requested slice
length, the last slice will be shorter.
>>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3))
[(1, 2, 3), (4, 5, 6), (7, 8)]
This function will only work for iterables that support slicing.
For non-sliceable iterables, see :func:`chunked`.
"""
return takewhile(len, (seq[i : i + n] for i in count(0, n)))
def split_at(iterable, pred, maxsplit=-1):
"""Yield lists of items from *iterable*, where each list is delimited by
an item where callable *pred* returns ``True``.
The lists do not include the delimiting items:
>>> list(split_at('abcdcba', lambda x: x == 'b'))
[['a'], ['c', 'd', 'c'], ['a']]
>>> list(split_at(range(10), lambda n: n % 2 == 1))
[[0], [2], [4], [6], [8], []]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_at(range(10), lambda n: n % 2 == 1, maxsplit=2))
[[0], [2], [4, 5, 6, 7, 8, 9]]
"""
if maxsplit == 0:
yield list(iterable)
return
buf = []
it = iter(iterable)
for item in it:
if pred(item):
yield buf
if maxsplit == 1:
yield list(it)
return
buf = []
maxsplit -= 1
else:
buf.append(item)
yield buf
def split_before(iterable, pred, maxsplit=-1):
"""Yield lists of items from *iterable*, where each list ends just before
an item for which callable *pred* returns ``True``:
>>> list(split_before('OneTwo', lambda s: s.isupper()))
[['O', 'n', 'e'], ['T', 'w', 'o']]
>>> list(split_before(range(10), lambda n: n % 3 == 0))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_before(range(10), lambda n: n % 3 == 0, maxsplit=2))
[[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]]
"""
if maxsplit == 0:
yield list(iterable)
return
buf = []
it = iter(iterable)
for item in it:
if pred(item) and buf:
yield buf
if maxsplit == 1:
yield [item] + list(it)
return
buf = []
maxsplit -= 1
buf.append(item)
yield buf
def split_after(iterable, pred, maxsplit=-1):
"""Yield lists of items from *iterable*, where each list ends with an
item where callable *pred* returns ``True``:
>>> list(split_after('one1two2', lambda s: s.isdigit()))
[['o', 'n', 'e', '1'], ['t', 'w', 'o', '2']]
>>> list(split_after(range(10), lambda n: n % 3 == 0))
[[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_after(range(10), lambda n: n % 3 == 0, maxsplit=2))
[[0], [1, 2, 3], [4, 5, 6, 7, 8, 9]]
"""
if maxsplit == 0:
yield list(iterable)
return
buf = []
it = iter(iterable)
for item in it:
buf.append(item)
if pred(item) and buf:
yield buf
if maxsplit == 1:
yield list(it)
return
buf = []
maxsplit -= 1
if buf:
yield buf
def split_when(iterable, pred, maxsplit=-1):
"""Split *iterable* into pieces based on the output of *pred*.
*pred* should be a function that takes successive pairs of items and
returns ``True`` if the iterable should be split in between them.
For example, to find runs of increasing numbers, split the iterable when
element ``i`` is larger than element ``i + 1``:
>>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], lambda x, y: x > y))
[[1, 2, 3, 3], [2, 5], [2, 4], [2]]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2],
... lambda x, y: x > y, maxsplit=2))
[[1, 2, 3, 3], [2, 5], [2, 4, 2]]
"""
if maxsplit == 0:
yield list(iterable)
return
it = iter(iterable)
try:
cur_item = next(it)
except StopIteration:
return
buf = [cur_item]
for next_item in it:
if pred(cur_item, next_item):
yield buf
if maxsplit == 1:
yield [next_item] + list(it)
return
buf = []
maxsplit -= 1
buf.append(next_item)
cur_item = next_item
yield buf
def split_into(iterable, sizes):
"""Yield a list of sequential items from *iterable* of length 'n' for each
integer 'n' in *sizes*.
>>> list(split_into([1,2,3,4,5,6], [1,2,3]))
[[1], [2, 3], [4, 5, 6]]
If the sum of *sizes* is smaller than the length of *iterable*, then the
remaining items of *iterable* will not be returned.
>>> list(split_into([1,2,3,4,5,6], [2,3]))
[[1, 2], [3, 4, 5]]
If the sum of *sizes* is larger than the length of *iterable*, fewer items
will be returned in the iteration that overruns *iterable* and further
lists will be empty:
>>> list(split_into([1,2,3,4], [1,2,3,4]))
[[1], [2, 3], [4], []]
When a ``None`` object is encountered in *sizes*, the returned list will
contain items up to the end of *iterable* the same way that itertools.slice
does:
>>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None]))
[[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]]
:func:`split_into` can be useful for grouping a series of items where the
sizes of the groups are not uniform. An example would be where in a row
from a table, multiple columns represent elements of the same feature
(e.g. a point represented by x,y,z) but, the format is not the same for
all columns.
"""
# convert the iterable argument into an iterator so its contents can
# be consumed by islice in case it is a generator
it = iter(iterable)
for size in sizes:
if size is None:
yield list(it)
return
else:
yield list(islice(it, size))
def padded(iterable, fillvalue=None, n=None, next_multiple=False):
"""Yield the elements from *iterable*, followed by *fillvalue*, such that
at least *n* items are emitted.
>>> list(padded([1, 2, 3], '?', 5))
[1, 2, 3, '?', '?']
If *next_multiple* is ``True``, *fillvalue* will be emitted until the
number of items emitted is a multiple of *n*::
>>> list(padded([1, 2, 3, 4], n=3, next_multiple=True))
[1, 2, 3, 4, None, None]
If *n* is ``None``, *fillvalue* will be emitted indefinitely.
"""
it = iter(iterable)
if n is None:
yield from chain(it, repeat(fillvalue))
elif n < 1:
raise ValueError('n must be at least 1')
else:
item_count = 0
for item in it:
yield item
item_count += 1
remaining = (n - item_count) % n if next_multiple else n - item_count
for _ in range(remaining):
yield fillvalue
def repeat_last(iterable, default=None):
"""After the *iterable* is exhausted, keep yielding its last element.
>>> list(islice(repeat_last(range(3)), 5))
[0, 1, 2, 2, 2]
If the iterable is empty, yield *default* forever::
>>> list(islice(repeat_last(range(0), 42), 5))
[42, 42, 42, 42, 42]
"""
item = _marker
for item in iterable:
yield item
final = default if item is _marker else item
yield from repeat(final)
def distribute(n, iterable):
"""Distribute the items from *iterable* among *n* smaller iterables.
>>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6])
>>> list(group_1)
[1, 3, 5]
>>> list(group_2)
[2, 4, 6]
If the length of *iterable* is not evenly divisible by *n*, then the
length of the returned iterables will not be identical:
>>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7])
>>> [list(c) for c in children]
[[1, 4, 7], [2, 5], [3, 6]]
If the length of *iterable* is smaller than *n*, then the last returned
iterables will be empty:
>>> children = distribute(5, [1, 2, 3])
>>> [list(c) for c in children]
[[1], [2], [3], [], []]
This function uses :func:`itertools.tee` and may require significant
storage. If you need the order items in the smaller iterables to match the
original iterable, see :func:`divide`.
"""
if n < 1:
raise ValueError('n must be at least 1')
children = tee(iterable, n)
return [islice(it, index, None, n) for index, it in enumerate(children)]
def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None):
"""Yield tuples whose elements are offset from *iterable*.
The amount by which the `i`-th item in each tuple is offset is given by
the `i`-th item in *offsets*.
>>> list(stagger([0, 1, 2, 3]))
[(None, 0, 1), (0, 1, 2), (1, 2, 3)]
>>> list(stagger(range(8), offsets=(0, 2, 4)))
[(0, 2, 4), (1, 3, 5), (2, 4, 6), (3, 5, 7)]
By default, the sequence will end when the final element of a tuple is the
last item in the iterable. To continue until the first element of a tuple
is the last item in the iterable, set *longest* to ``True``::
>>> list(stagger([0, 1, 2, 3], longest=True))
[(None, 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)]
By default, ``None`` will be used to replace offsets beyond the end of the
sequence. Specify *fillvalue* to use some other value.
"""
children = tee(iterable, len(offsets))
return zip_offset(
*children, offsets=offsets, longest=longest, fillvalue=fillvalue
)
def zip_offset(*iterables, offsets, longest=False, fillvalue=None):
"""``zip`` the input *iterables* together, but offset the `i`-th iterable
by the `i`-th item in *offsets*.
>>> list(zip_offset('0123', 'abcdef', offsets=(0, 1)))
[('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e')]
This can be used as a lightweight alternative to SciPy or pandas to analyze
data sets in which some series have a lead or lag relationship.
By default, the sequence will end when the shortest iterable is exhausted.
To continue until the longest iterable is exhausted, set *longest* to
``True``.
>>> list(zip_offset('0123', 'abcdef', offsets=(0, 1), longest=True))
[('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e'), (None, 'f')]
By default, ``None`` will be used to replace offsets beyond the end of the
sequence. Specify *fillvalue* to use some other value.
"""
if len(iterables) != len(offsets):
raise ValueError("Number of iterables and offsets didn't match")
staggered = []
for it, n in zip(iterables, offsets):
if n < 0:
staggered.append(chain(repeat(fillvalue, -n), it))
elif n > 0:
staggered.append(islice(it, n, None))
else:
staggered.append(it)
if longest:
return zip_longest(*staggered, fillvalue=fillvalue)
return zip(*staggered)
def sort_together(iterables, key_list=(0,), reverse=False):
"""Return the input iterables sorted together, with *key_list* as the
priority for sorting. All iterables are trimmed to the length of the
shortest one.
This can be used like the sorting function in a spreadsheet. If each
iterable represents a column of data, the key list determines which
columns are used for sorting.
By default, all iterables are sorted using the ``0``-th iterable::
>>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')]
>>> sort_together(iterables)
[(1, 2, 3, 4), ('d', 'c', 'b', 'a')]
Set a different key list to sort according to another iterable.
Specifying multiple keys dictates how ties are broken::
>>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')]
>>> sort_together(iterables, key_list=(1, 2))
[(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')]
Set *reverse* to ``True`` to sort in descending order.
>>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True)
[(3, 2, 1), ('a', 'b', 'c')]
"""
return list(
zip(
*sorted(
zip(*iterables), key=itemgetter(*key_list), reverse=reverse
)
)
)
def unzip(iterable):
"""The inverse of :func:`zip`, this function disaggregates the elements
of the zipped *iterable*.
The ``i``-th iterable contains the ``i``-th element from each element
of the zipped iterable. The first element is used to to determine the
length of the remaining elements.
>>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
>>> letters, numbers = unzip(iterable)
>>> list(letters)
['a', 'b', 'c', 'd']
>>> list(numbers)
[1, 2, 3, 4]
This is similar to using ``zip(*iterable)``, but it avoids reading
*iterable* into memory. Note, however, that this function uses
:func:`itertools.tee` and thus may require significant storage.
"""
head, iterable = spy(iter(iterable))
if not head:
# empty iterable, e.g. zip([], [], [])
return ()
# spy returns a one-length iterable as head
head = head[0]
iterables = tee(iterable, len(head))
def itemgetter(i):
def getter(obj):
try:
return obj[i]
except IndexError:
# basically if we have an iterable like
# iter([(1, 2, 3), (4, 5), (6,)])
# the second unzipped iterable would fail at the third tuple
# since it would try to access tup[1]
# same with the third unzipped iterable and the second tuple
# to support these "improperly zipped" iterables,
# we create a custom itemgetter
# which just stops the unzipped iterables
# at first length mismatch
raise StopIteration
return getter
return tuple(map(itemgetter(i), it) for i, it in enumerate(iterables))
def divide(n, iterable):
"""Divide the elements from *iterable* into *n* parts, maintaining
order.
>>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6])
>>> list(group_1)
[1, 2, 3]
>>> list(group_2)
[4, 5, 6]
If the length of *iterable* is not evenly divisible by *n*, then the
length of the returned iterables will not be identical:
>>> children = divide(3, [1, 2, 3, 4, 5, 6, 7])
>>> [list(c) for c in children]
[[1, 2, 3], [4, 5], [6, 7]]
If the length of the iterable is smaller than n, then the last returned
iterables will be empty:
>>> children = divide(5, [1, 2, 3])
>>> [list(c) for c in children]
[[1], [2], [3], [], []]
This function will exhaust the iterable before returning and may require
significant storage. If order is not important, see :func:`distribute`,
which does not first pull the iterable into memory.
"""
if n < 1:
raise ValueError('n must be at least 1')
try:
iterable[:0]
except TypeError:
seq = tuple(iterable)
else:
seq = iterable
q, r = divmod(len(seq), n)
ret = []
stop = 0
for i in range(1, n + 1):
start = stop
stop += q + 1 if i <= r else q
ret.append(iter(seq[start:stop]))
return ret
def always_iterable(obj, base_type=(str, bytes)):
"""If *obj* is iterable, return an iterator over its items::
>>> obj = (1, 2, 3)
>>> list(always_iterable(obj))
[1, 2, 3]
If *obj* is not iterable, return a one-item iterable containing *obj*::
>>> obj = 1
>>> list(always_iterable(obj))
[1]
If *obj* is ``None``, return an empty iterable:
>>> obj = None
>>> list(always_iterable(None))
[]
By default, binary and text strings are not considered iterable::
>>> obj = 'foo'
>>> list(always_iterable(obj))
['foo']
If *base_type* is set, objects for which ``isinstance(obj, base_type)``
returns ``True`` won't be considered iterable.
>>> obj = {'a': 1}
>>> list(always_iterable(obj)) # Iterate over the dict's keys
['a']
>>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit
[{'a': 1}]
Set *base_type* to ``None`` to avoid any special handling and treat objects
Python considers iterable as iterable:
>>> obj = 'foo'
>>> list(always_iterable(obj, base_type=None))
['f', 'o', 'o']
"""
if obj is None:
return iter(())
if (base_type is not None) and isinstance(obj, base_type):
return iter((obj,))
try:
return iter(obj)
except TypeError:
return iter((obj,))
def adjacent(predicate, iterable, distance=1):
"""Return an iterable over `(bool, item)` tuples where the `item` is
drawn from *iterable* and the `bool` indicates whether
that item satisfies the *predicate* or is adjacent to an item that does.
For example, to find whether items are adjacent to a ``3``::
>>> list(adjacent(lambda x: x == 3, range(6)))
[(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)]
Set *distance* to change what counts as adjacent. For example, to find
whether items are two places away from a ``3``:
>>> list(adjacent(lambda x: x == 3, range(6), distance=2))
[(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)]
This is useful for contextualizing the results of a search function.
For example, a code comparison tool might want to identify lines that
have changed, but also surrounding lines to give the viewer of the diff
context.
The predicate function will only be called once for each item in the
iterable.
See also :func:`groupby_transform`, which can be used with this function
to group ranges of items with the same `bool` value.
"""
# Allow distance=0 mainly for testing that it reproduces results with map()
if distance < 0:
raise ValueError('distance must be at least 0')
i1, i2 = tee(iterable)
padding = [False] * distance
selected = chain(padding, map(predicate, i1), padding)
adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1))
return zip(adjacent_to_selected, i2)
def groupby_transform(iterable, keyfunc=None, valuefunc=None):
"""An extension of :func:`itertools.groupby` that transforms the values of
*iterable* after grouping them.
*keyfunc* is a function used to compute a grouping key for each item.
*valuefunc* is a function for transforming the items after grouping.
>>> iterable = 'AaaABbBCcA'
>>> keyfunc = lambda x: x.upper()
>>> valuefunc = lambda x: x.lower()
>>> grouper = groupby_transform(iterable, keyfunc, valuefunc)
>>> [(k, ''.join(g)) for k, g in grouper]
[('A', 'aaaa'), ('B', 'bbb'), ('C', 'cc'), ('A', 'a')]
*keyfunc* and *valuefunc* default to identity functions if they are not
specified.
:func:`groupby_transform` is useful when grouping elements of an iterable
using a separate iterable as the key. To do this, :func:`zip` the iterables
and pass a *keyfunc* that extracts the first element and a *valuefunc*
that extracts the second element::
>>> from operator import itemgetter
>>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3]
>>> values = 'abcdefghi'
>>> iterable = zip(keys, values)
>>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1))
>>> [(k, ''.join(g)) for k, g in grouper]
[(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')]
Note that the order of items in the iterable is significant.
Only adjacent items are grouped together, so if you don't want any
duplicate groups, you should sort the iterable by the key function.
"""
res = groupby(iterable, keyfunc)
return ((k, map(valuefunc, g)) for k, g in res) if valuefunc else res
class numeric_range(abc.Sequence, abc.Hashable):
"""An extension of the built-in ``range()`` function whose arguments can
be any orderable numeric type.
With only *stop* specified, *start* defaults to ``0`` and *step*
defaults to ``1``. The output items will match the type of *stop*:
>>> list(numeric_range(3.5))
[0.0, 1.0, 2.0, 3.0]
With only *start* and *stop* specified, *step* defaults to ``1``. The
output items will match the type of *start*:
>>> from decimal import Decimal
>>> start = Decimal('2.1')
>>> stop = Decimal('5.1')
>>> list(numeric_range(start, stop))
[Decimal('2.1'), Decimal('3.1'), Decimal('4.1')]
With *start*, *stop*, and *step* specified the output items will match
the type of ``start + step``:
>>> from fractions import Fraction
>>> start = Fraction(1, 2) # Start at 1/2
>>> stop = Fraction(5, 2) # End at 5/2
>>> step = Fraction(1, 2) # Count by 1/2
>>> list(numeric_range(start, stop, step))
[Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)]
If *step* is zero, ``ValueError`` is raised. Negative steps are supported:
>>> list(numeric_range(3, -1, -1.0))
[3.0, 2.0, 1.0, 0.0]
Be aware of the limitations of floating point numbers; the representation
of the yielded numbers may be surprising.
``datetime.datetime`` objects can be used for *start* and *stop*, if *step*
is a ``datetime.timedelta`` object:
>>> import datetime
>>> start = datetime.datetime(2019, 1, 1)
>>> stop = datetime.datetime(2019, 1, 3)
>>> step = datetime.timedelta(days=1)
>>> items = iter(numeric_range(start, stop, step))
>>> next(items)
datetime.datetime(2019, 1, 1, 0, 0)
>>> next(items)
datetime.datetime(2019, 1, 2, 0, 0)
"""
_EMPTY_HASH = hash(range(0, 0))
def __init__(self, *args):
argc = len(args)
if argc == 1:
self._stop, = args
self._start = type(self._stop)(0)
self._step = type(self._stop - self._start)(1)
elif argc == 2:
self._start, self._stop = args
self._step = type(self._stop - self._start)(1)
elif argc == 3:
self._start, self._stop, self._step = args
elif argc == 0:
raise TypeError('numeric_range expected at least '
'1 argument, got {}'.format(argc))
else:
raise TypeError('numeric_range expected at most '
'3 arguments, got {}'.format(argc))
self._zero = type(self._step)(0)
if self._step == self._zero:
raise ValueError('numeric_range() arg 3 must not be zero')
self._growing = self._step > self._zero
self._init_len()
def __bool__(self):
if self._growing:
return self._start < self._stop
else:
return self._start > self._stop
def __contains__(self, elem):
if self._growing:
if self._start <= elem < self._stop:
return (elem - self._start) % self._step == self._zero
else:
if self._start >= elem > self._stop:
return (self._start - elem) % (-self._step) == self._zero
return False
def __eq__(self, other):
if isinstance(other, numeric_range):
empty_self = not bool(self)
empty_other = not bool(other)
if empty_self or empty_other:
return empty_self and empty_other # True if both empty
else:
return (self._start == other._start
and self._step == other._step
and self._get_by_index(-1) == other._get_by_index(-1))
else:
return False
def __getitem__(self, key):
if isinstance(key, int):
return self._get_by_index(key)
elif isinstance(key, slice):
step = self._step if key.step is None else key.step * self._step
if key.start is None or key.start <= -self._len:
start = self._start
elif key.start >= self._len:
start = self._stop
else: # -self._len < key.start < self._len
start = self._get_by_index(key.start)
if key.stop is None or key.stop >= self._len:
stop = self._stop
elif key.stop <= -self._len:
stop = self._start
else: # -self._len < key.stop < self._len
stop = self._get_by_index(key.stop)
return numeric_range(start, stop, step)
else:
raise TypeError(
'numeric range indices must be '
'integers or slices, not {}'.format(type(key).__name__))
def __hash__(self):
if self:
return hash((self._start, self._get_by_index(-1), self._step))
else:
return self._EMPTY_HASH
def __iter__(self):
values = (self._start + (n * self._step) for n in count())
if self._growing:
return takewhile(partial(gt, self._stop), values)
else:
return takewhile(partial(lt, self._stop), values)
def __len__(self):
return self._len
def _init_len(self):
if self._growing:
start = self._start
stop = self._stop
step = self._step
else:
start = self._stop
stop = self._start
step = -self._step
distance = stop - start
if distance <= self._zero:
self._len = 0
else: # distance > 0 and step > 0: regular euclidean division
q, r = divmod(distance, step)
self._len = int(q) + int(r != self._zero)
def __reduce__(self):
return numeric_range, (self._start, self._stop, self._step)
def __repr__(self):
if self._step == 1:
return "numeric_range({}, {})".format(repr(self._start),
repr(self._stop))
else:
return "numeric_range({}, {}, {})".format(repr(self._start),
repr(self._stop),
repr(self._step))
def __reversed__(self):
return iter(numeric_range(self._get_by_index(-1),
self._start - self._step, -self._step))
def count(self, value):
return int(value in self)
def index(self, value):
if self._growing:
if self._start <= value < self._stop:
q, r = divmod(value - self._start, self._step)
if r == self._zero:
return int(q)
else:
if self._start >= value > self._stop:
q, r = divmod(self._start - value, -self._step)
if r == self._zero:
return int(q)
raise ValueError("{} is not in numeric range".format(value))
def _get_by_index(self, i):
if i < 0:
i += self._len
if i < 0 or i >= self._len:
raise IndexError("numeric range object index out of range")
return self._start + i * self._step
def count_cycle(iterable, n=None):
"""Cycle through the items from *iterable* up to *n* times, yielding
the number of completed cycles along with each item. If *n* is omitted the
process repeats indefinitely.
>>> list(count_cycle('AB', 3))
[(0, 'A'), (0, 'B'), (1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')]
"""
iterable = tuple(iterable)
if not iterable:
return iter(())
counter = count() if n is None else range(n)
return ((i, item) for i in counter for item in iterable)
def locate(iterable, pred=bool, window_size=None):
"""Yield the index of each item in *iterable* for which *pred* returns
``True``.
*pred* defaults to :func:`bool`, which will select truthy items:
>>> list(locate([0, 1, 1, 0, 1, 0, 0]))
[1, 2, 4]
Set *pred* to a custom function to, e.g., find the indexes for a particular
item.
>>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b'))
[1, 3]
If *window_size* is given, then the *pred* function will be called with
that many items. This enables searching for sub-sequences:
>>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
>>> pred = lambda *args: args == (1, 2, 3)
>>> list(locate(iterable, pred=pred, window_size=3))
[1, 5, 9]
Use with :func:`seekable` to find indexes and then retrieve the associated
items:
>>> from itertools import count
>>> from more_itertools import seekable
>>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count())
>>> it = seekable(source)
>>> pred = lambda x: x > 100
>>> indexes = locate(it, pred=pred)
>>> i = next(indexes)
>>> it.seek(i)
>>> next(it)
106
"""
if window_size is None:
return compress(count(), map(pred, iterable))
if window_size < 1:
raise ValueError('window size must be at least 1')
it = windowed(iterable, window_size, fillvalue=_marker)
return compress(count(), starmap(pred, it))
def lstrip(iterable, pred):
"""Yield the items from *iterable*, but strip any from the beginning
for which *pred* returns ``True``.
For example, to remove a set of items from the start of an iterable:
>>> iterable = (None, False, None, 1, 2, None, 3, False, None)
>>> pred = lambda x: x in {None, False, ''}
>>> list(lstrip(iterable, pred))
[1, 2, None, 3, False, None]
This function is analogous to to :func:`str.lstrip`, and is essentially
an wrapper for :func:`itertools.dropwhile`.
"""
return dropwhile(pred, iterable)
def rstrip(iterable, pred):
"""Yield the items from *iterable*, but strip any from the end
for which *pred* returns ``True``.
For example, to remove a set of items from the end of an iterable:
>>> iterable = (None, False, None, 1, 2, None, 3, False, None)
>>> pred = lambda x: x in {None, False, ''}
>>> list(rstrip(iterable, pred))
[None, False, None, 1, 2, None, 3]
This function is analogous to :func:`str.rstrip`.
"""
cache = []
cache_append = cache.append
cache_clear = cache.clear
for x in iterable:
if pred(x):
cache_append(x)
else:
yield from cache
cache_clear()
yield x
def strip(iterable, pred):
"""Yield the items from *iterable*, but strip any from the
beginning and end for which *pred* returns ``True``.
For example, to remove a set of items from both ends of an iterable:
>>> iterable = (None, False, None, 1, 2, None, 3, False, None)
>>> pred = lambda x: x in {None, False, ''}
>>> list(strip(iterable, pred))
[1, 2, None, 3]
This function is analogous to :func:`str.strip`.
"""
return rstrip(lstrip(iterable, pred), pred)
def islice_extended(iterable, *args):
"""An extension of :func:`itertools.islice` that supports negative values
for *stop*, *start*, and *step*.
>>> iterable = iter('abcdefgh')
>>> list(islice_extended(iterable, -4, -1))
['e', 'f', 'g']
Slices with negative values require some caching of *iterable*, but this
function takes care to minimize the amount of memory required.
For example, you can use a negative step with an infinite iterator:
>>> from itertools import count
>>> list(islice_extended(count(), 110, 99, -2))
[110, 108, 106, 104, 102, 100]
"""
s = slice(*args)
start = s.start
stop = s.stop
if s.step == 0:
raise ValueError('step argument must be a non-zero integer or None.')
step = s.step or 1
it = iter(iterable)
if step > 0:
start = 0 if (start is None) else start
if start < 0:
# Consume all but the last -start items
cache = deque(enumerate(it, 1), maxlen=-start)
len_iter = cache[-1][0] if cache else 0
# Adjust start to be positive
i = max(len_iter + start, 0)
# Adjust stop to be positive
if stop is None:
j = len_iter
elif stop >= 0:
j = min(stop, len_iter)
else:
j = max(len_iter + stop, 0)
# Slice the cache
n = j - i
if n <= 0:
return
for index, item in islice(cache, 0, n, step):
yield item
elif (stop is not None) and (stop < 0):
# Advance to the start position
next(islice(it, start, start), None)
# When stop is negative, we have to carry -stop items while
# iterating
cache = deque(islice(it, -stop), maxlen=-stop)
for index, item in enumerate(it):
cached_item = cache.popleft()
if index % step == 0:
yield cached_item
cache.append(item)
else:
# When both start and stop are positive we have the normal case
yield from islice(it, start, stop, step)
else:
start = -1 if (start is None) else start
if (stop is not None) and (stop < 0):
# Consume all but the last items
n = -stop - 1
cache = deque(enumerate(it, 1), maxlen=n)
len_iter = cache[-1][0] if cache else 0
# If start and stop are both negative they are comparable and
# we can just slice. Otherwise we can adjust start to be negative
# and then slice.
if start < 0:
i, j = start, stop
else:
i, j = min(start - len_iter, -1), None
for index, item in list(cache)[i:j:step]:
yield item
else:
# Advance to the stop position
if stop is not None:
m = stop + 1
next(islice(it, m, m), None)
# stop is positive, so if start is negative they are not comparable
# and we need the rest of the items.
if start < 0:
i = start
n = None
# stop is None and start is positive, so we just need items up to
# the start index.
elif stop is None:
i = None
n = start + 1
# Both stop and start are positive, so they are comparable.
else:
i = None
n = start - stop
if n <= 0:
return
cache = list(islice(it, n))
yield from cache[i::step]
def always_reversible(iterable):
"""An extension of :func:`reversed` that supports all iterables, not
just those which implement the ``Reversible`` or ``Sequence`` protocols.
>>> print(*always_reversible(x for x in range(3)))
2 1 0
If the iterable is already reversible, this function returns the
result of :func:`reversed()`. If the iterable is not reversible,
this function will cache the remaining items in the iterable and
yield them in reverse order, which may require significant storage.
"""
try:
return reversed(iterable)
except TypeError:
return reversed(list(iterable))
def consecutive_groups(iterable, ordering=lambda x: x):
"""Yield groups of consecutive items using :func:`itertools.groupby`.
The *ordering* function determines whether two items are adjacent by
returning their position.
By default, the ordering function is the identity function. This is
suitable for finding runs of numbers:
>>> iterable = [1, 10, 11, 12, 20, 30, 31, 32, 33, 40]
>>> for group in consecutive_groups(iterable):
... print(list(group))
[1]
[10, 11, 12]
[20]
[30, 31, 32, 33]
[40]
For finding runs of adjacent letters, try using the :meth:`index` method
of a string of letters:
>>> from string import ascii_lowercase
>>> iterable = 'abcdfgilmnop'
>>> ordering = ascii_lowercase.index
>>> for group in consecutive_groups(iterable, ordering):
... print(list(group))
['a', 'b', 'c', 'd']
['f', 'g']
['i']
['l', 'm', 'n', 'o', 'p']
Each group of consecutive items is an iterator that shares it source with
*iterable*. When an an output group is advanced, the previous group is
no longer available unless its elements are copied (e.g., into a ``list``).
>>> iterable = [1, 2, 11, 12, 21, 22]
>>> saved_groups = []
>>> for group in consecutive_groups(iterable):
... saved_groups.append(list(group)) # Copy group elements
>>> saved_groups
[[1, 2], [11, 12], [21, 22]]
"""
for k, g in groupby(
enumerate(iterable), key=lambda x: x[0] - ordering(x[1])
):
yield map(itemgetter(1), g)
def difference(iterable, func=sub, *, initial=None):
"""By default, compute the first difference of *iterable* using
:func:`operator.sub`.
>>> iterable = [0, 1, 3, 6, 10]
>>> list(difference(iterable))
[0, 1, 2, 3, 4]
This is the opposite of :func:`itertools.accumulate`'s default behavior:
>>> from itertools import accumulate
>>> iterable = [0, 1, 2, 3, 4]
>>> list(accumulate(iterable))
[0, 1, 3, 6, 10]
>>> list(difference(accumulate(iterable)))
[0, 1, 2, 3, 4]
By default *func* is :func:`operator.sub`, but other functions can be
specified. They will be applied as follows::
A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ...
For example, to do progressive division:
>>> iterable = [1, 2, 6, 24, 120] # Factorial sequence
>>> func = lambda x, y: x // y
>>> list(difference(iterable, func))
[1, 2, 3, 4, 5]
Since Python 3.8, :func:`itertools.accumulate` can be supplied with an
*initial* keyword argument. If :func:`difference` is called with *initial*
set to something other than ``None``, it will skip the first element when
computing successive differences.
>>> iterable = [100, 101, 103, 106] # accumate([1, 2, 3], initial=100)
>>> list(difference(iterable, initial=100))
[1, 2, 3]
"""
a, b = tee(iterable)
try:
first = [next(b)]
except StopIteration:
return iter([])
if initial is not None:
first = []
return chain(first, starmap(func, zip(b, a)))
class SequenceView(Sequence):
"""Return a read-only view of the sequence object *target*.
:class:`SequenceView` objects are analogous to Python's built-in
"dictionary view" types. They provide a dynamic view of a sequence's items,
meaning that when the sequence updates, so does the view.
>>> seq = ['0', '1', '2']
>>> view = SequenceView(seq)
>>> view
SequenceView(['0', '1', '2'])
>>> seq.append('3')
>>> view
SequenceView(['0', '1', '2', '3'])
Sequence views support indexing, slicing, and length queries. They act
like the underlying sequence, except they don't allow assignment:
>>> view[1]
'1'
>>> view[1:-1]
['1', '2']
>>> len(view)
4
Sequence views are useful as an alternative to copying, as they don't
require (much) extra storage.
"""
def __init__(self, target):
if not isinstance(target, Sequence):
raise TypeError
self._target = target
def __getitem__(self, index):
return self._target[index]
def __len__(self):
return len(self._target)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, repr(self._target))
class seekable:
"""Wrap an iterator to allow for seeking backward and forward. This
progressively caches the items in the source iterable so they can be
re-visited.
Call :meth:`seek` with an index to seek to that position in the source
iterable.
To "reset" an iterator, seek to ``0``:
>>> from itertools import count
>>> it = seekable((str(n) for n in count()))
>>> next(it), next(it), next(it)
('0', '1', '2')
>>> it.seek(0)
>>> next(it), next(it), next(it)
('0', '1', '2')
>>> next(it)
'3'
You can also seek forward:
>>> it = seekable((str(n) for n in range(20)))
>>> it.seek(10)
>>> next(it)
'10'
>>> it.seek(20) # Seeking past the end of the source isn't a problem
>>> list(it)
[]
>>> it.seek(0) # Resetting works even after hitting the end
>>> next(it), next(it), next(it)
('0', '1', '2')
You may view the contents of the cache with the :meth:`elements` method.
That returns a :class:`SequenceView`, a view that updates automatically:
>>> it = seekable((str(n) for n in range(10)))
>>> next(it), next(it), next(it)
('0', '1', '2')
>>> elements = it.elements()
>>> elements
SequenceView(['0', '1', '2'])
>>> next(it)
'3'
>>> elements
SequenceView(['0', '1', '2', '3'])
By default, the cache grows as the source iterable progresses, so beware of
wrapping very large or infinite iterables. Supply *maxlen* to limit the
size of the cache (this of course limits how far back you can seek).
>>> from itertools import count
>>> it = seekable((str(n) for n in count()), maxlen=2)
>>> next(it), next(it), next(it), next(it)
('0', '1', '2', '3')
>>> list(it.elements())
['2', '3']
>>> it.seek(0)
>>> next(it), next(it), next(it), next(it)
('2', '3', '4', '5')
>>> next(it)
'6'
"""
def __init__(self, iterable, maxlen=None):
self._source = iter(iterable)
if maxlen is None:
self._cache = []
else:
self._cache = deque([], maxlen)
self._index = None
def __iter__(self):
return self
def __next__(self):
if self._index is not None:
try:
item = self._cache[self._index]
except IndexError:
self._index = None
else:
self._index += 1
return item
item = next(self._source)
self._cache.append(item)
return item
def elements(self):
return SequenceView(self._cache)
def seek(self, index):
self._index = index
remainder = index - len(self._cache)
if remainder > 0:
consume(self, remainder)
class run_length:
"""
:func:`run_length.encode` compresses an iterable with run-length encoding.
It yields groups of repeated items with the count of how many times they
were repeated:
>>> uncompressed = 'abbcccdddd'
>>> list(run_length.encode(uncompressed))
[('a', 1), ('b', 2), ('c', 3), ('d', 4)]
:func:`run_length.decode` decompresses an iterable that was previously
compressed with run-length encoding. It yields the items of the
decompressed iterable:
>>> compressed = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
>>> list(run_length.decode(compressed))
['a', 'b', 'b', 'c', 'c', 'c', 'd', 'd', 'd', 'd']
"""
@staticmethod
def encode(iterable):
return ((k, ilen(g)) for k, g in groupby(iterable))
@staticmethod
def decode(iterable):
return chain.from_iterable(repeat(k, n) for k, n in iterable)
def exactly_n(iterable, n, predicate=bool):
"""Return ``True`` if exactly ``n`` items in the iterable are ``True``
according to the *predicate* function.
>>> exactly_n([True, True, False], 2)
True
>>> exactly_n([True, True, False], 1)
False
>>> exactly_n([0, 1, 2, 3, 4, 5], 3, lambda x: x < 3)
True
The iterable will be advanced until ``n + 1`` truthy items are encountered,
so avoid calling it on infinite iterables.
"""
return len(take(n + 1, filter(predicate, iterable))) == n
def circular_shifts(iterable):
"""Return a list of circular shifts of *iterable*.
>>> circular_shifts(range(4))
[(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)]
"""
lst = list(iterable)
return take(len(lst), windowed(cycle(lst), len(lst)))
def make_decorator(wrapping_func, result_index=0):
"""Return a decorator version of *wrapping_func*, which is a function that
modifies an iterable. *result_index* is the position in that function's
signature where the iterable goes.
This lets you use itertools on the "production end," i.e. at function
definition. This can augment what the function returns without changing the
function's code.
For example, to produce a decorator version of :func:`chunked`:
>>> from more_itertools import chunked
>>> chunker = make_decorator(chunked, result_index=0)
>>> @chunker(3)
... def iter_range(n):
... return iter(range(n))
...
>>> list(iter_range(9))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
To only allow truthy items to be returned:
>>> truth_serum = make_decorator(filter, result_index=1)
>>> @truth_serum(bool)
... def boolean_test():
... return [0, 1, '', ' ', False, True]
...
>>> list(boolean_test())
[1, ' ', True]
The :func:`peekable` and :func:`seekable` wrappers make for practical
decorators:
>>> from more_itertools import peekable
>>> peekable_function = make_decorator(peekable)
>>> @peekable_function()
... def str_range(*args):
... return (str(x) for x in range(*args))
...
>>> it = str_range(1, 20, 2)
>>> next(it), next(it), next(it)
('1', '3', '5')
>>> it.peek()
'7'
>>> next(it)
'7'
"""
# See https://sites.google.com/site/bbayles/index/decorator_factory for
# notes on how this works.
def decorator(*wrapping_args, **wrapping_kwargs):
def outer_wrapper(f):
def inner_wrapper(*args, **kwargs):
result = f(*args, **kwargs)
wrapping_args_ = list(wrapping_args)
wrapping_args_.insert(result_index, result)
return wrapping_func(*wrapping_args_, **wrapping_kwargs)
return inner_wrapper
return outer_wrapper
return decorator
def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None):
"""Return a dictionary that maps the items in *iterable* to categories
defined by *keyfunc*, transforms them with *valuefunc*, and
then summarizes them by category with *reducefunc*.
*valuefunc* defaults to the identity function if it is unspecified.
If *reducefunc* is unspecified, no summarization takes place:
>>> keyfunc = lambda x: x.upper()
>>> result = map_reduce('abbccc', keyfunc)
>>> sorted(result.items())
[('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])]
Specifying *valuefunc* transforms the categorized items:
>>> keyfunc = lambda x: x.upper()
>>> valuefunc = lambda x: 1
>>> result = map_reduce('abbccc', keyfunc, valuefunc)
>>> sorted(result.items())
[('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])]
Specifying *reducefunc* summarizes the categorized items:
>>> keyfunc = lambda x: x.upper()
>>> valuefunc = lambda x: 1
>>> reducefunc = sum
>>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc)
>>> sorted(result.items())
[('A', 1), ('B', 2), ('C', 3)]
You may want to filter the input iterable before applying the map/reduce
procedure:
>>> all_items = range(30)
>>> items = [x for x in all_items if 10 <= x <= 20] # Filter
>>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1
>>> categories = map_reduce(items, keyfunc=keyfunc)
>>> sorted(categories.items())
[(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])]
>>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum)
>>> sorted(summaries.items())
[(0, 90), (1, 75)]
Note that all items in the iterable are gathered into a list before the
summarization step, which may require significant storage.
The returned object is a :obj:`collections.defaultdict` with the
``default_factory`` set to ``None``, such that it behaves like a normal
dictionary.
"""
valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc
ret = defaultdict(list)
for item in iterable:
key = keyfunc(item)
value = valuefunc(item)
ret[key].append(value)
if reducefunc is not None:
for key, value_list in ret.items():
ret[key] = reducefunc(value_list)
ret.default_factory = None
return ret
def rlocate(iterable, pred=bool, window_size=None):
"""Yield the index of each item in *iterable* for which *pred* returns
``True``, starting from the right and moving left.
*pred* defaults to :func:`bool`, which will select truthy items:
>>> list(rlocate([0, 1, 1, 0, 1, 0, 0])) # Truthy at 1, 2, and 4
[4, 2, 1]
Set *pred* to a custom function to, e.g., find the indexes for a particular
item:
>>> iterable = iter('abcb')
>>> pred = lambda x: x == 'b'
>>> list(rlocate(iterable, pred))
[3, 1]
If *window_size* is given, then the *pred* function will be called with
that many items. This enables searching for sub-sequences:
>>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
>>> pred = lambda *args: args == (1, 2, 3)
>>> list(rlocate(iterable, pred=pred, window_size=3))
[9, 5, 1]
Beware, this function won't return anything for infinite iterables.
If *iterable* is reversible, ``rlocate`` will reverse it and search from
the right. Otherwise, it will search from the left and return the results
in reverse order.
See :func:`locate` to for other example applications.
"""
if window_size is None:
try:
len_iter = len(iterable)
return (len_iter - i - 1 for i in locate(reversed(iterable), pred))
except TypeError:
pass
return reversed(list(locate(iterable, pred, window_size)))
def replace(iterable, pred, substitutes, count=None, window_size=1):
"""Yield the items from *iterable*, replacing the items for which *pred*
returns ``True`` with the items from the iterable *substitutes*.
>>> iterable = [1, 1, 0, 1, 1, 0, 1, 1]
>>> pred = lambda x: x == 0
>>> substitutes = (2, 3)
>>> list(replace(iterable, pred, substitutes))
[1, 1, 2, 3, 1, 1, 2, 3, 1, 1]
If *count* is given, the number of replacements will be limited:
>>> iterable = [1, 1, 0, 1, 1, 0, 1, 1, 0]
>>> pred = lambda x: x == 0
>>> substitutes = [None]
>>> list(replace(iterable, pred, substitutes, count=2))
[1, 1, None, 1, 1, None, 1, 1, 0]
Use *window_size* to control the number of items passed as arguments to
*pred*. This allows for locating and replacing subsequences.
>>> iterable = [0, 1, 2, 5, 0, 1, 2, 5]
>>> window_size = 3
>>> pred = lambda *args: args == (0, 1, 2) # 3 items passed to pred
>>> substitutes = [3, 4] # Splice in these items
>>> list(replace(iterable, pred, substitutes, window_size=window_size))
[3, 4, 5, 3, 4, 5]
"""
if window_size < 1:
raise ValueError('window_size must be at least 1')
# Save the substitutes iterable, since it's used more than once
substitutes = tuple(substitutes)
# Add padding such that the number of windows matches the length of the
# iterable
it = chain(iterable, [_marker] * (window_size - 1))
windows = windowed(it, window_size)
n = 0
for w in windows:
# If the current window matches our predicate (and we haven't hit
# our maximum number of replacements), splice in the substitutes
# and then consume the following windows that overlap with this one.
# For example, if the iterable is (0, 1, 2, 3, 4...)
# and the window size is 2, we have (0, 1), (1, 2), (2, 3)...
# If the predicate matches on (0, 1), we need to zap (0, 1) and (1, 2)
if pred(*w):
if (count is None) or (n < count):
n += 1
yield from substitutes
consume(windows, window_size - 1)
continue
# If there was no match (or we've reached the replacement limit),
# yield the first item from the window.
if w and (w[0] is not _marker):
yield w[0]
def partitions(iterable):
"""Yield all possible order-perserving partitions of *iterable*.
>>> iterable = 'abc'
>>> for part in partitions(iterable):
... print([''.join(p) for p in part])
['abc']
['a', 'bc']
['ab', 'c']
['a', 'b', 'c']
This is unrelated to :func:`partition`.
"""
sequence = list(iterable)
n = len(sequence)
for i in powerset(range(1, n)):
yield [sequence[i:j] for i, j in zip((0,) + i, i + (n,))]
def set_partitions(iterable, k=None):
"""
Yield the set partitions of *iterable* into *k* parts. Set partitions are
not order-preserving.
>>> iterable = 'abc'
>>> for part in set_partitions(iterable, 2):
... print([''.join(p) for p in part])
['a', 'bc']
['ab', 'c']
['b', 'ac']
If *k* is not given, every set partition is generated.
>>> iterable = 'abc'
>>> for part in set_partitions(iterable):
... print([''.join(p) for p in part])
['abc']
['a', 'bc']
['ab', 'c']
['b', 'ac']
['a', 'b', 'c']
"""
L = list(iterable)
n = len(L)
if k is not None:
if k < 1:
raise ValueError(
"Can't partition in a negative or zero number of groups"
)
elif k > n:
return
def set_partitions_helper(L, k):
n = len(L)
if k == 1:
yield [L]
elif n == k:
yield [[s] for s in L]
else:
e, *M = L
for p in set_partitions_helper(M, k - 1):
yield [[e], *p]
for p in set_partitions_helper(M, k):
for i in range(len(p)):
yield p[:i] + [[e] + p[i]] + p[i + 1 :]
if k is None:
for k in range(1, n + 1):
yield from set_partitions_helper(L, k)
else:
yield from set_partitions_helper(L, k)
def time_limited(limit_seconds, iterable):
"""
Yield items from *iterable* until *limit_seconds* have passed.
>>> from time import sleep
>>> def generator():
... yield 1
... yield 2
... sleep(0.2)
... yield 3
>>> iterable = generator()
>>> list(time_limited(0.1, iterable))
[1, 2]
Note that the time is checked before each item is yielded, and iteration
stops if the time elapsed is greater than *limit_seconds*. If your time
limit is 1 second, but it takes 2 seconds to generate the first item from
the iterable, the function will run for 2 seconds and not yield anything.
"""
if limit_seconds < 0:
raise ValueError('limit_seconds must be positive')
start_time = monotonic()
for item in iterable:
if monotonic() - start_time > limit_seconds:
break
yield item
def only(iterable, default=None, too_long=None):
"""If *iterable* has only one item, return it.
If it has zero items, return *default*.
If it has more than one item, raise the exception given by *too_long*,
which is ``ValueError`` by default.
>>> only([], default='missing')
'missing'
>>> only([1])
1
>>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: Expected exactly one item in iterable, but got 1, 2,
and perhaps more.'
>>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError
Note that :func:`only` attempts to advance *iterable* twice to ensure there
is only one item. See :func:`spy` or :func:`peekable` to check
iterable contents less destructively.
"""
it = iter(iterable)
first_value = next(it, default)
try:
second_value = next(it)
except StopIteration:
pass
else:
msg = (
'Expected exactly one item in iterable, but got {!r}, {!r}, '
'and perhaps more.'.format(first_value, second_value)
)
raise too_long or ValueError(msg)
return first_value
def ichunked(iterable, n):
"""Break *iterable* into sub-iterables with *n* elements each.
:func:`ichunked` is like :func:`chunked`, but it yields iterables
instead of lists.
If the sub-iterables are read in order, the elements of *iterable*
won't be stored in memory.
If they are read out of order, :func:`itertools.tee` is used to cache
elements as necessary.
>>> from itertools import count
>>> all_chunks = ichunked(count(), 4)
>>> c_1, c_2, c_3 = next(all_chunks), next(all_chunks), next(all_chunks)
>>> list(c_2) # c_1's elements have been cached; c_3's haven't been
[4, 5, 6, 7]
>>> list(c_1)
[0, 1, 2, 3]
>>> list(c_3)
[8, 9, 10, 11]
"""
source = iter(iterable)
while True:
# Check to see whether we're at the end of the source iterable
item = next(source, _marker)
if item is _marker:
return
# Clone the source and yield an n-length slice
source, it = tee(chain([item], source))
yield islice(it, n)
# Advance the source iterable
consume(source, n)
def distinct_combinations(iterable, r):
"""Yield the distinct combinations of *r* items taken from *iterable*.
>>> list(distinct_combinations([0, 0, 1], 2))
[(0, 0), (0, 1)]
Equivalent to ``set(combinations(iterable))``, except duplicates are not
generated and thrown away. For larger input sequences this is much more
efficient.
"""
if r < 0:
raise ValueError('r must be non-negative')
elif r == 0:
yield ()
else:
pool = tuple(iterable)
for i, prefix in unique_everseen(enumerate(pool), key=itemgetter(1)):
for suffix in distinct_combinations(pool[i + 1 :], r - 1):
yield (prefix,) + suffix
def filter_except(validator, iterable, *exceptions):
"""Yield the items from *iterable* for which the *validator* function does
not raise one of the specified *exceptions*.
*validator* is called for each item in *iterable*.
It should be a function that accepts one argument and raises an exception
if that item is not valid.
>>> iterable = ['1', '2', 'three', '4', None]
>>> list(filter_except(int, iterable, ValueError, TypeError))
['1', '2', '4']
If an exception other than one given by *exceptions* is raised by
*validator*, it is raised like normal.
"""
exceptions = tuple(exceptions)
for item in iterable:
try:
validator(item)
except exceptions:
pass
else:
yield item
def map_except(function, iterable, *exceptions):
"""Transform each item from *iterable* with *function* and yield the
result, unless *function* raises one of the specified *exceptions*.
*function* is called to transform each item in *iterable*.
It should be a accept one argument.
>>> iterable = ['1', '2', 'three', '4', None]
>>> list(map_except(int, iterable, ValueError, TypeError))
[1, 2, 4]
If an exception other than one given by *exceptions* is raised by
*function*, it is raised like normal.
"""
exceptions = tuple(exceptions)
for item in iterable:
try:
yield function(item)
except exceptions:
pass
def _sample_unweighted(iterable, k):
# Implementation of "Algorithm L" from the 1994 paper by Kim-Hung Li:
# "Reservoir-Sampling Algorithms of Time Complexity O(n(1+log(N/n)))".
# Fill up the reservoir (collection of samples) with the first `k` samples
reservoir = take(k, iterable)
# Generate random number that's the largest in a sample of k U(0,1) numbers
# Largest order statistic: https://en.wikipedia.org/wiki/Order_statistic
W = exp(log(random()) / k)
# The number of elements to skip before changing the reservoir is a random
# number with a geometric distribution. Sample it using random() and logs.
next_index = k + floor(log(random()) / log(1 - W))
for index, element in enumerate(iterable, k):
if index == next_index:
reservoir[randrange(k)] = element
# The new W is the largest in a sample of k U(0, `old_W`) numbers
W *= exp(log(random()) / k)
next_index += floor(log(random()) / log(1 - W)) + 1
return reservoir
def _sample_weighted(iterable, k, weights):
# Implementation of "A-ExpJ" from the 2006 paper by Efraimidis et al. :
# "Weighted random sampling with a reservoir".
# Log-transform for numerical stability for weights that are small/large
weight_keys = (log(random()) / weight for weight in weights)
# Fill up the reservoir (collection of samples) with the first `k`
# weight-keys and elements, then heapify the list.
reservoir = take(k, zip(weight_keys, iterable))
heapify(reservoir)
# The number of jumps before changing the reservoir is a random variable
# with an exponential distribution. Sample it using random() and logs.
smallest_weight_key, _ = reservoir[0]
weights_to_skip = log(random()) / smallest_weight_key
for weight, element in zip(weights, iterable):
if weight >= weights_to_skip:
# The notation here is consistent with the paper, but we store
# the weight-keys in log-space for better numerical stability.
smallest_weight_key, _ = reservoir[0]
t_w = exp(weight * smallest_weight_key)
r_2 = uniform(t_w, 1) # generate U(t_w, 1)
weight_key = log(r_2) / weight
heapreplace(reservoir, (weight_key, element))
smallest_weight_key, _ = reservoir[0]
weights_to_skip = log(random()) / smallest_weight_key
else:
weights_to_skip -= weight
# Equivalent to [element for weight_key, element in sorted(reservoir)]
return [heappop(reservoir)[1] for _ in range(k)]
def sample(iterable, k, weights=None):
"""Return a *k*-length list of elements chosen (without replacement)
from the *iterable*. Like :func:`random.sample`, but works on iterables
of unknown length.
>>> iterable = range(100)
>>> sample(iterable, 5) # doctest: +SKIP
[81, 60, 96, 16, 4]
An iterable with *weights* may also be given:
>>> iterable = range(100)
>>> weights = (i * i + 1 for i in range(100))
>>> sampled = sample(iterable, 5, weights=weights) # doctest: +SKIP
[79, 67, 74, 66, 78]
The algorithm can also be used to generate weighted random permutations.
The relative weight of each item determines the probability that it
appears late in the permutation.
>>> data = "abcdefgh"
>>> weights = range(1, len(data) + 1)
>>> sample(data, k=len(data), weights=weights) # doctest: +SKIP
['c', 'a', 'b', 'e', 'g', 'd', 'h', 'f']
"""
if k == 0:
return []
iterable = iter(iterable)
if weights is None:
return _sample_unweighted(iterable, k)
else:
weights = iter(weights)
return _sample_weighted(iterable, k, weights)
| mit |
DavidBreuer/CytoSeg | CytoSeg/utils.py | 1 | 74513 | # -*- coding: utf-8 -*-
################################################################################
# Module: utils.py
# Description: Test imports and network extraction
# License: GPL3, see full license in LICENSE.txt
# Web: https://github.com/DavidBreuer/CytoSeg
################################################################################
#%%############################################################################# imports
import itertools
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import os
import pandas as pd
import random
import scipy as sp
import scipy.misc
import scipy.ndimage
import scipy.optimize
import scipy.spatial
import scipy.stats
import scipy.cluster
import skimage
import skimage.filters
import skimage.morphology
import skimage.feature
import skimage.segmentation
import shapely
import shapely.geometry
import sys
import xml
import xml.dom
import xml.dom.minidom
import utils
#%%############################################################################# help functions
def xmlread(name,threed=0):
"""Read Fiji-Trackmate xml file to Python list of lists.
Parameters
----------
name : name and directory of xml file
threed : set to 1 for three-dimensional data
Returns
-------
T : list of tracks
"""
xmldoc=xml.dom.minidom.parse(name)
spots=xmldoc.getElementsByTagName('Spot')
tracs=xmldoc.getElementsByTagName('Track')
S=[]
N=[]
for spot in spots:
n=int(spot.attributes['ID'].value)
t=float(spot.attributes['POSITION_T'].value)
x=float(spot.attributes['POSITION_X'].value)
y=float(spot.attributes['POSITION_Y'].value)
if(threed): z=float(spot.attributes['POSITION_Z'].value)
else: z=0
mi=float(spot.attributes['MEAN_INTENSITY'].value)
mt=float(spot.attributes['TOTAL_INTENSITY'].value)
mq=float(spot.attributes['QUALITY'].value)
md=float(spot.attributes['ESTIMATED_DIAMETER'].value)
N.append(n)
S.append([n,t,x,y,z,mi,mt,mq,md])
T=[]
for trac in tracs:
n=int(trac.attributes['TRACK_ID'].value)
dur=int(float(trac.attributes['TRACK_DURATION'].value))
dis=float(trac.attributes['TRACK_DISPLACEMENT'].value)
edges=trac.getElementsByTagName('Edge')
E=[]
for edge in edges:
id0=int(edge.attributes['SPOT_SOURCE_ID'].value)
id1=float(edge.attributes['SPOT_TARGET_ID'].value)
vel=float(edge.attributes['VELOCITY'].value)
n0=N.index(id0)
n1=N.index(id1)
m0,t0,x0,y0,z0,mi0,mt0,mq0,md0=S[n0]
m1,t1,x1,y1,z1,mi1,mt1,mq1,md1=S[n1]
E.append([t0,x0,y0,z0,mi0,mt0,mq0,md0,t1,x1,y1,z1,mi1,mt1,mq1,md1])
E=np.array(E)
if(len(E)>0):
E=E[E[:,0].argsort()]
T.append(E)
return T
def angle360(dxy):
"""Compute angle of two-dimensional vector relative to y-axis in degrees.
Parameters
----------
dxy : two-dimensional vector
Returns
-------
angle : angle in degrees
"""
dx,dy=dxy
rad2deg=180.0/np.pi
angle=np.mod(np.arctan2(-dx,-dy)*rad2deg+180.0,360.0)
return angle
def im2d3d(im):
"""Convert two-dimensional array to three-dimensional array.
Parameters
----------
im : array or image
Returns
-------
im : array or image
"""
if(len(im.shape)==2):
im=im[:,:,np.newaxis]
else:
im=im
return im
def remove_duplicates(points):
"""Remove duplicates from list.
Parameters
----------
points : list
Returns
-------
pointz : list without duplicates
"""
pointz=pd.DataFrame(points).drop_duplicates().values
return pointz
def tube_filter(imO,sigma):
"""Apply tubeness filter to image.
Parameters
----------
imO : original two-dimensional image
sigma : width parameter of tube-like structures
Returns
-------
imT : filtered and rescaled image
"""
imH=skimage.feature.hessian_matrix(imO,sigma=sigma,mode='reflect')
imM=skimage.feature.hessian_matrix_eigvals(imH[0],imH[1],imH[2])
imR=-1.0*imM[1]
imT=255.0*(imR-imR.min())/(imR.max()-imR.min())
return imT
def cell_sample(mask,R):
"""Sample random points uniformly across masked area.
Parameters
----------
mask : sampling area
R : number of sampling points
Returns
-------
coords : sampled random points
"""
wh=np.array(np.where(mask)).T
W=len(wh)
idx=sp.random.randint(0,W,R)
coords=wh[idx]+sp.rand(R,2)
return coords
def multi_line_intersect(seg,segs):
"""Check intersections of line segments.
Parameters
----------
seg : single line segment
sigma : multiple line segments
Returns
-------
intersects : Boolean array indicating intersects
"""
intersects=np.array([False])
if(len(segs)>0):
d3=segs[:,1,:]-segs[:,0,:]
d1=seg[1,:]-seg[0,:]
c1x=np.cross(d3,seg[0,:]-segs[:,0,:])
c1y=np.cross(d3,seg[1,:]-segs[:,0,:])
c3x=np.cross(d1,segs[:,0,:]-seg[0,:])
c3y=np.cross(d1,segs[:,1,:]-seg[0,:])
intersect=np.logical_and(c1x*c1y<0,c3x*c3y<0)
return intersects
def bounds(x,xmin,xmax):
"""Restrict number to interval.
Parameters
----------
x : number
xmin : lower bound
xmax : upper bound
Returns
-------
x : bounded number
"""
if(x<xmin):
x=xmin
elif(x>xmax):
x=xmax
return x
def node_condense(imM,imG,ones):
"""Condense neighboring to single node located at center of mass.
Parameters
----------
imM : binary node array (0 = background; 1 = nodes)
imG : gray-scale intensity image
ones : array defining neighborhood structure
Returns
-------
imL : condensed and labeled node array (0 = background; 1-N = nodes)
"""
imL,N=sp.ndimage.label(imM,structure=ones) # label nodes
sizes=sp.ndimage.sum(imL>0,imL,range(1,N+1)) # compute size of nodes (clusters)
coms=sp.ndimage.center_of_mass(imG,imL,range(1,N+1)) # compute center of mass of nodes (clusters)
for n in range(N): # for each node...
if(sizes[n]>1): # if cluster...
idx=(imL==n+1) # get cluster coordinates
idm=tuple(np.add(coms[n],0.5).astype('int')) # get center of mass coordinates
imL[idx]=0 # remove node cluster
imL[idm]=n+1 # set node at center of mass
imL,N=sp.ndimage.label(imL>0,structure=ones) # label nodes
imL=imL.astype('int')
return imL
def node_find(im):
"""Find nodes in binary filament image.
Parameters
----------
im : section of binary filament image
Returns
-------
val : central pixel of image section (0 = not a node; 1 = node)
"""
ims=np.reshape(im,(3,3,3)) # convert image section of 3x3x3 array
val=0
if(ims[1,1,1]==1): # if central pixel lies on filament...
ims[1,1,1]=0 # remove central pixel
iml,L=sp.ndimage.label(ims) # label remaining filaments
if(L!=0 and L!=2): # if there is one (set end node) or more than two filaments (set crossing node)...
val=1 # set node
return val
def connected_components(graph):
"""Compute connected components of graph after removal of edges with capacities below 50th percentile.
Parameters
----------
graph : original graph
Returns
-------
ca : list of sizes of connected components
"""
gc=graph.copy()
edges=gc.edges(data=True)
ec=1.0*np.array([d['capa'] for u,v,d in edges])
perc=np.percentile(ec,50.0)
for u,v,d in edges:
if d['capa']<=perc:
gc.remove_edge(u,v)
cc=nx.connected_components(gc)
ca=np.array([len(c) for c in cc])
return ca
def path_lengths(graph):
"""Compute shortest path lengths.
Parameters
----------
graph : original graph
Returns
-------
dist : array of shortest path lengths
"""
dists=nx.all_pairs_dijkstra_path_length(graph,weight='lgth')
dist=np.array([[v for v in u.values()] for u in dists.values()])
dist=np.tril(dist)
dist[dist==0]=np.nan
return dist
def edge_angles(graph,pos,mask):
"""Compute distribution of angles between network edges and cell axis.
Parameters
----------
graph : original graph
pos : node positions
mask : binary array of cellular region of interest
Returns
-------
degs : list of angles between edges and cell axis
"""
c0,c1,vc,vd,an,rot=utils.mask2rot(mask) # compute angle of cell axis
degs=[]
for u,v,d in graph.edges(data=True): # for each edge...
degs.append(np.mod(utils.angle360(1.0*(pos[u]-pos[v]))+360.0-an,180.0)) # compute angle between edge and cell axis
return degs
def crossing_number(graph,pos):
"""Compute number of edge intersections per edge.
Parameters
----------
graph : original graph
pos : node positions
Returns
-------
cns : list of edge crossing numbers
"""
ee=np.array(graph.edges()) # get edge edges
edges=[]
cns=[]
for i,(n1,n2) in enumerate(graph.edges_iter()): # for each edge...
edge=np.array([[pos[n1][0],pos[n1][1]],[pos[n2][0],pos[n2][1]]]) # append edge as line segment
edges.append(edge)
for i,(n1,n2) in enumerate(graph.edges_iter()): # for each edge...
idx=(ee[:,0]!=n1)*(ee[:,1]!=n1)*(ee[:,0]!=n2)*(ee[:,1]!=n2) # exclude edge that share a node with the selected edge
idx[i]=False # exclude selected edge itself
edge=np.array([[pos[n1][0],pos[n1][1]],[pos[n2][0],pos[n2][1]]]) # treat edge as line segment
cross=utils.multi_line_intersect(np.array(edge),np.array(edges)[idx]) # check intersections of selected edge with remaining edges
cns.append(cross.sum()) # append crossing number of selected edge
return cns
#%%############################################################################# graph functions
def skeletonize_graph(imO,mask,sigma,block,small,factr):
"""Filter and skeletonize image of filament structures.
Parameters
----------
imO : original image
mask : binary array of cellular region of interest
sigma : width of tubeness filter and filament structures
block : block size of adaptive median filter
small : size of smallest components
factr : fraction of average intensity below which components are removed
Returns
-------
imR : image after application of tubeness filter
imA : filtered and skeletonized image
"""
imO-=imO[mask].min()
imO*=255.0/imO.max()
ly,lx,lz=imO.shape
imR=imO.copy()*0
imT=imO.copy()*0
for z in range(lz):
imR[:,:,z]=tube_filter(imO[:,:,z],sigma)
imT[:,:,z]=skimage.filters.threshold_adaptive(imR[:,:,z],block)
imS=skimage.morphology.skeletonize_3d(imT>0)
ones=np.ones((3,3,3))
imC=skimage.morphology.remove_small_objects(imS,small,connectivity=2)>0
for z in range(lz):
imC[:,:,z]=imC[:,:,z]*mask
imC=imC>0
imL,N=sp.ndimage.label(imC,structure=ones)
mean=imO[imC].mean()
means=[np.mean(imO[imL==n]) for n in range(1,N+1)]
imA=1.0*imC.copy()
for n in range(1,N+1):
if(means[n-1]<mean*factr):
imA[imL==n]=0
imA=skimage.morphology.remove_small_objects(imA>0,2,connectivity=8)
return imR,imA
def node_graph(imA,imG):
"""Construct image indicating background (=0), filaments (=1), and labeled nodes (>1).
Parameters
----------
imA : skeletonized image of filament structures
imG : Gaussian filtered image of filament structures
Returns
-------
imE : image indicating background, filaments, and nodes
"""
ones=np.ones((3,3,3)) # neighborhood structure of pixel
imM=sp.ndimage.generic_filter(imA,utils.node_find,footprint=ones,mode='constant',cval=0) # find nodes as endpoints or crossings of filaments
imN=utils.node_condense(imM,imG,ones) # condense neighboring nodes
imL=skimage.segmentation.relabel_sequential(imN)[0] # relabel nodes
imB,B=sp.ndimage.label(imA,structure=ones) # label components of skeletoninzed image
for b in range(1,B+1): # for each component...
no=np.max((imB==b)*(imL>0)) # if component does not contain node...
if(no==0):
imA[imB==b]=0 # remove component
imE=1*((imA+imL)>0)+imL # construct image indicating background (=0) filaments (=1) and labeled nodes (>1).
return imE
def make_graph(imE,imG):
"""Construct network representation from image of filament structures.
Parameters
----------
imE : image indicating background (=0), filaments (=1), and labeled nodes (>1)
imG : Gaussian filtered image of filament structures
Returns
-------
graph : network representation of filament structures
pos : node positions
"""
N=imE.max()-1 # number of nodes
sq2=np.sqrt(2.0) # distance between diagonal pixels
sq3=np.sqrt(3.0) # distance between room diagonal pixels
diag=np.array([[[sq3,sq2,sq3],[sq2,1,sq2],[sq3,sq2,sq3]],[[sq2,1,sq2],[1,0,1],[sq2,1,sq2]],[[sq3,sq2,sq3],[sq2,1,sq2],[sq3,sq2,sq3]]]) # distance matrix of 3x3x3 neighborhood
pos=np.array(np.where(imE>1)).T[:,::-1].astype('int') # node positions
pos=pos[:,[1,2,0]] # change order of node positions (x,y,z)
imY=imE.copy() # array to propagate nodes
imL=1.0*(imE.copy()>0) # array to remember summed length of filament up to current position
imS=1.0*(imE.copy()>0) # array to remember summed intensity of filament up to current position
ly,lx,lz=imE.shape # get image dimensions
ys=(imY==1).sum() # get points in image which are neither background (=0), nor nodes (>1), but filament (=1)
while(ys>0): # while there is still "filament" in the image
c=np.transpose(np.where(imY>1)) # positions of node pixels (>1)
for y,x,z in c: # for each node pixel (>1)...
xmin,xmax=utils.bounds(x-1,0,lx),utils.bounds(x+2,0,lx) # consider 3x3x3 neighborhood around our pixel of interest which is cropped at the borders of the image
ymin,ymax=utils.bounds(y-1,0,ly),utils.bounds(y+2,0,ly)
zmin,zmax=utils.bounds(z-1,0,lz),utils.bounds(z+2,0,lz)
sec=imY[ymin:ymax,xmin:xmax,zmin:zmax] # get 3x3x3 neighborhood of node array
lgt=imL[ymin:ymax,xmin:xmax,zmin:zmax] # get 3x3x3 neighborhood of filament length array
stg=imS[ymin:ymax,xmin:xmax,zmin:zmax] # get 3x3x3 neighborhood of filament intensity array
imY[ymin:ymax,xmin:xmax,zmin:zmax]=np.where(sec==1,imY[y,x,z],sec) # if 3x3x3 neighborhood contains node (>1) set all filament pixels to this node index
imL[ymin:ymax,xmin:xmax,zmin:zmax]=np.where(lgt==1,diag[0:ymax-ymin,0:xmax-xmin,0:zmax-zmin]+imL[y,x,z],lgt) # if 3x3x3 neighborhood contains filament, increase straight/diagonal/room diagonal surrounding pixels in length array by 1/sqrt(2)/sqrt(3), respectively
imS[ymin:ymax,xmin:xmax,zmin:zmax]=np.where(stg==1,imG[y,x,z]+imS[y,x,z],stg) # if 3x3x3 neighborhood contains filament, increase intensity array by intensity of the original image
ys=(imY==1).sum() # compute remaining amout of filament
graph=nx.empty_graph(N,nx.MultiGraph()) # create empty multi graph
ys,xs,zs=np.where(imY>1) # get all labeled filament pixels
for y,x,z in zip(ys,xs,zs): # for each labeled filament pixel...
xy=imY[y,x,z] # get node index
xmin,xmax=utils.bounds(x-1,0,lx),utils.bounds(x+2,0,lx) # consider 3x3x3 neighborhood around our pixel of interest which is cropped at the borders of the image
ymin,ymax=utils.bounds(y-1,0,ly),utils.bounds(y+2,0,ly)
zmin,zmax=utils.bounds(z-1,0,lz),utils.bounds(z+2,0,lz)
sec=imY[ymin:ymax,xmin:xmax,zmin:zmax].flatten() # get 3x3x3 neighborhood of filament image
lgt=imL[ymin:ymax,xmin:xmax,zmin:zmax].flatten()
stg=imS[ymin:ymax,xmin:xmax,zmin:zmax].flatten()
for idx,i in enumerate(sec): # check all pixels in 3x3x3 neighborhood...
if(i!=xy and i>1): # if the center and neighboring pixels have different labels...
u,v=np.sort([xy-2,i-2]) # sort nodes to avoid adding bidirectional edges (A->B and B->A)
edist=sp.linalg.norm(pos[u]-pos[v]) # compute Euklidean distance between the corresponding nodes
fdist=imL[y,x,z]+lgt[idx] # compute sum of the two partial filament lengths
weight=imS[y,x,z]+stg[idx] # compute sum of the two partial filament intensities
weight=max(1e-9,weight) # set minimum edge weight
capa=1.0*weight/fdist # compute edge capacity as ration of filament weight and length
lgth=1.0*fdist/weight # compute edge length as inverse capacity
conn=0 # set edge connectivity variable indicating that edge belongs to original, non-connected network
jump=0 # set edge jump variable indicating that edge belongs to original, non-periodic network
graph.add_edge(u,v,edist=edist,fdist=fdist,weight=weight,capa=capa,lgth=lgth,conn=conn,jump=jump) # add edge to network
return graph,pos
def unify_graph(graph):
"""Project multigraph to simple graph.
Parameters
----------
graph : original graph
Returns
-------
graphz : simple graph
"""
graphz=nx.empty_graph(graph.number_of_nodes()) # construct new empty graph with the same number of nodes
for u,v,d in graph.edges(data=True): # for each edge in the multigraph...
edist=d['edist'] # get edge properties
fdist=d['fdist']
weight=d['weight']
capa=d['capa']
lgth=d['lgth']
conn=d['conn']
jump=d['jump']
multi=1 # set edge multiplicity to one
if graphz.has_edge(u,v): # if simple graph already contains the edge in question...
graphz[u][v]['multi']+=1.0 # increase edge multiplicity by one
graphz[u][v]['capa']+=capa # compute sum of edge capacities
if(graphz[u][v]['lgth']>lgth): # compute minimum of edge lengths
graphz[u][v]['lgth']=lgth
else:
graphz.add_edge(u,v,edist=edist,fdist=fdist,weight=weight,capa=capa,lgth=lgth,conn=conn,jump=jump,multi=multi) # add edge to simple graph otherwise
return graphz
def connect_graph(graph,pos,imG):
"""Connect graph by adding edges of minimum edge length.
Parameters
----------
graph : original graph
pos : node positions
imG : Gaussian filtered image of filament structures
Returns
-------
graphz : connect graph
"""
dists=sp.spatial.distance_matrix(pos,pos) # compute distance matrix between all node positions
graphz=graph.copy() # copy original graph
N=graphz.number_of_nodes() # get number of nodes
comp=nx.connected_components(graphz) # compute connected components
comp=sorted(comp,key=len)[::-1] # sort connected components in descending order according to size
while len(comp)>1: # while network is disconnected...
compo=comp[0] # get nodes in largest component
compl=list(compo)
compi=list(set(range(N)).difference(compo)) # get remaining nodes
dist=dists[compl][:,compi] # get distance matrix between nodes of largest component and remaining network
n0,ni=np.unravel_index(dist.argmin(),dist.shape) # find pair of nodes with minimum distance
p0,pi=pos[compl][n0],pos[compi][ni]
edist=sp.linalg.norm(p0-pi) # compute distance between nodes
edist=max(1.0,edist) # set minimum distance between nodes
fdist=1.0*np.ceil(edist) # approximate filament length by rounding node distance
aa=np.array([p0[0],p0[1],pi[0],pi[1]]) # draw line between nodes
yy,xx=skimage.draw.line(*aa.astype('int'))
zz=(np.linspace(p0[2],pi[2],len(xx))).astype('int')
weight=np.sum(imG[xx,yy,zz]) # compute edge weight as image intensity along line
weight=max(1e-9,weight) # set minimum edge weight
capa=1.0*weight/fdist # compute edge capacity as ration of filament weight and length
lgth=1.0*fdist/weight # compute edge length as inverse capacity
conn=1 # set edge connectivity variable indicating that edge belongs to new, connected network
jump=0 # set edge jump variable indicating that edge belongs to original, non-periodic network
multi=1 # set edge mutiplicity variable
graphz.add_edge(compi[ni],compl[n0],edist=edist,fdist=fdist,weight=weight,capa=capa,lgth=lgth,conn=conn,jump=jump,multi=multi) # add edge to network
comp=nx.connected_components(graphz) # compute connected components
comp=sorted(comp,key=len)[::-1] # sort connected components in descending order according to size
return graphz
def randomize_graph(graph,pos,mask,planar=0,weights=0,iterations=1000):
"""Randomize graph by shuffling node positions and edges or edge capacities only.
Parameters
----------
graph : original graph
pos : node positions
mask : binary array of cellular region of interest
planar : ignore edge crossings (=0) or favor planar graph by reducing number of edge crossings (=1)
weights : shuffle only edge capacities (=0) or node positions and edges (=1)
iterations : number of iterations before returning original graph
Returns
-------
graphz : randomized graph
poz : randomized node positions
"""
if(weights==0): # if shuffling of edge capacities only...
ec=np.array([d for u,v,d in graph.edges(data=True)]) # get edge properties
random.shuffle(ec) # shuffle edge capacities
graphz=graph.copy() # copy graph
for j,(u,v,d) in enumerate(graphz.edges(data=True)): # for each edge...
for k in d.keys(): # copy shuffled edge properties
d[k]=ec[j][k]
poz=pos # copy node positions
else: # shuffling of node positions and edges otherwise
N=graph.number_of_nodes() # get node number
E=graph.number_of_edges() # get edge number
graphz=nx.empty_graph(N,nx.MultiGraph()) # create new, empty multigraph
diste=np.array([d['edist'] for u,v,d in graph.edges(data=True)]) # get Euclidean edge lengths
bins=[0,10,20,30,40,50,60,70,80,90,100,110,120,130,140,150,160,170,180,190,200,9999] # set bin boundaries for edge lengths
B=len(bins)-1 # get number of bins
dibse=np.zeros(E).astype('int') # create array for assigning bin numbers to edges
for i,(b1,b2) in enumerate(zip(bins[:-1],bins[1:])): # for each bin...
ide=(diste>=b1)*(diste<b2) # get edges with Euclidean lengths in the given bin
dibse[ide]=i # assign bin number to edges
eweight=np.array([d['weight'] for u,v,d in graph.edges(data=True)]) # get edge weights
ecapa=np.array([d['capa'] for u,v,d in graph.edges(data=True)]) # get edge capacities
redo=1 # variable indicating that no suitable randomization was obtained yet
iteration=0 # number of iterations
while(redo==1 and iteration<iterations): # while neither a suitable randomization nor the number of allowed iterations were reached yet...
iteration+=1 # increase iteration by one
poz=utils.cell_sample(mask,N)[:,::-1].astype('int') # shuffle xy-components of node positions
zzz=pos[:,2] # keep z-component of node positions
poz=np.vstack([poz.T,zzz]).T # merge xyz-components of node positions
dista=scipy.spatial.distance_matrix(poz,poz) # compute distance matrix between new node positions
dibsa=np.zeros((N,N)).astype('int') # assign bin numbers to all new, potential edges
for i,(b1,b2) in enumerate(zip(bins[:-1],bins[1:])):
ida=(dista>=b1)*(dista<b2)
dibsa[ida]=i
dibsa[np.tri(N)>0]=-9999 # set lower part of the bin number matrix to negativ number to exclude loops (A->A) and bidirectional edges (A->B and B->A)
redo=1*np.max([(dibsa==b).sum()<(dibse==b).sum() for b in range(B)]) # check that each original edge can be accommodated given the new node positions
if(iteration<iterations): # if the number of allowed iterations was not reached yet...
isort=np.argsort(diste)[::-1] # sort bin assignments, edge weights, and edge capacities by Euclidean length
diste=diste[isort]
dibse=dibse[isort]
eweight=eweight[isort]
ecapa=ecapa[isort]
edges=[] # list of added edges
for e in range(E): # for each edge...
candidates=np.where(dibsa==dibse[e]) # get candidate pairs of new nodes whose distance matches the Euclidean length of the selected edge
C=len(candidates[0]) # get number of candidate pairs
cromm=9999 # dummy variable for number of edge crossings
ii=random.sample(range(C),min(50,C)) # select up to 50 candidate pairs
for i in ii: # for each candidate pair...
n1=candidates[0][i] # get nodes
n2=candidates[1][i]
edge=np.array([[poz[n1][0],poz[n2][0]],[poz[n1][1],poz[n2][1]]]).T # create line segment between candidate nodes
cross=planar*utils.multi_line_intersect(np.array(edge),np.array(edges)).sum() # compute number of line segment crossings with existing edges
if(cross<cromm and dibsa[n1,n2]>=0): # if edge is allowed and number of crossings is smaller than for previous candidates...
cromm=cross # store crossing number
edgem=edge # store edge
m1,m2=n1,n2 # store nodes
edges.append(edgem) # add edge to list of edges
edist=dista[m1,m2] # set Euclidean distance
fdist=1.0*np.ceil(edist) # approximate filament length by rounding node distance
weight=eweight[e] # set edge weight
capa=ecapa[e] # set edge capacity
lgth=1.0*fdist/weight # compute edge length as inverse capacity
conn=0 # set edge connectivity variable indicating that edge belongs to randomized, non-connected connected network
jump=0 # set edge jump variable indicating that edge belongs to randomized, non-periodic network
multi=1 # set edge mutiplicity variable
graphz.add_edge(m1,m2,edist=edist,fdist=fdist,weight=weight,capa=capa,lgth=lgth,conn=conn,jump=jump,multi=multi) # add edge to network
dibsa[m1,m2]=-9999 # remove edge from allowed edges
dibsa[m2,m1]=-9999
else:
graphz,poz=graph,pos # copy original network and node positions otherwise
return graphz,poz
def centralize_graph(graph,epb='lgth',efb='capa',ndg='capa',nec='capa',npr='capa'):
"""Compute edge centralities.
Parameters
----------
graph : original graph
epb : edge property used for computation of edge path betweenness
efb : " flow betweenness
ndg : " degree centrality
nec : " eigenvector centrality
npr : " page rank
Returns
-------
graphz : graph with computed edge centralities
"""
graphz=graph.copy() # copy graph
edges=graphz.edges(data=True) # get edge capacities
ec=1.0*np.array([d['capa'] for u,v,d in edges])
ec/=ec.sum() # normalize edge capacities
el=1.0/ec
for i,(u,v,d) in enumerate(edges): # update edge capacities and lengths
d['capa']=ec[i]
d['lgth']=el[i]
epb=nx.edge_betweenness_centrality(graphz,weight=epb) # compute edge path betweenness
efb=nx.edge_current_flow_betweenness_centrality(graphz,weight=efb) # compute edge flow betweenness
lineg=nx.line_graph(graphz) # compute line graph
degree=graphz.degree(weight=ndg) # get capacity weighted edge degree
for u,v,d in lineg.edges(data=True): # set edge capacity of linegraph to node degree of original graph
n=list(set(u).intersection(v))[0]
d[ndg]=degree[n]
nec=nx.eigenvector_centrality_numpy(lineg,weight=ndg) # compute edge degree, eigenvector, and page rank centrality
npr=nx.pagerank(lineg,weight=ndg)
ndg=lineg.degree(weight=ndg)
for i,(u,v,d) in enumerate(edges): # set edge centralities
e=(u,v)
if(e in epb.keys()):
d['epb']=epb[e]
else:
d['epb']=epb[e[::-1]]
if(e in efb.keys()):
d['efb']=efb[e]
else:
d['efb']=efb[e[::-1]]
if(e in ndg.keys()):
d['ndg']=ndg[e]
else:
d['ndg']=ndg[e[::-1]]
if(e in nec.keys()):
d['nec']=nec[e]
else:
d['nec']=nec[e[::-1]]
if(e in npr.keys()):
d['npr']=npr[e]
else:
d['npr']=npr[e[::-1]]
return graphz
def normalize_graph(graph):
"""Normalize edge properties.
Parameters
----------
graph : original graph
Returns
-------
graph : graph with normalized edge properties
"""
ec=1.0*np.array([d['capa'] for u,v,d in graph.edges(data=True)])
ec/=ec.sum()
el=1.0/ec
el/=el.sum()
epb=1.0*np.array([d['epb'] for u,v,d in graph.edges(data=True)])
epb/=epb.sum()
efb=1.0*np.array([d['efb'] for u,v,d in graph.edges(data=True)])
efb/=efb.sum()
ndg=1.0*np.array([d['ndg'] for u,v,d in graph.edges(data=True)])
ndg/=ndg.sum()
nec=1.0*np.array([d['nec'] for u,v,d in graph.edges(data=True)])
nec/=nec.sum()
npr=1.0*np.array([d['npr'] for u,v,d in graph.edges(data=True)])
npr/=npr.sum()
for i,(u,v,d) in enumerate(graph.edges(data=True)):
d['capa']=ec[i]
d['lgth']=el[i]
d['epb']=epb[i]
d['efb']=efb[i]
d['ndg']=ndg[i]
d['nec']=nec[i]
d['npr']=npr[i]
return graph
def boundary_graph(jnet,graph,pos,SP,SL,JV,JH,imG,dthres=10.0,jthres=2.5):
"""Generate graph with periodic boundary conditions.
Parameters
----------
jnet : jump network
graph : original graph
pos : node positions
SP : shortest paths
SL : shortest path lengths
JV : number of vertical jumps along shortest path
JH : number of horizontal jumps along shortest path
imG : Gaussian filtered image of filament structures
Returns
-------
graphz : graph with periodic boundary conditions
"""
B=jnet.number_of_nodes() # get number of nodes of jump network
C=np.tril((SL<dthres)*((JV+JH)>0)*((JV+JH)<jthres))[B:,B:] # get pairs of nodes in jump network that are less than dthres apart and that are connected by at least/most 0/jthres
wh=np.array(np.where(C)).T
graphz=nx.MultiGraph(graph.copy()) # create new, empty multigraph
for idx,(w1,w2) in enumerate(wh): # for each pair of nodes, i.e., each potential edge...
path=SP[B+w1][B+w2] # get shortest path between selected nodes
pairs=zip(path[0:],path[1:])
weight=0.0
for n0,n1 in pairs: # for each edge along path...
if(jnet[n0][n1]['jump']==0): # if it is not a jump edge...
rr,cc=skimage.draw.line(pos[n0][1],pos[n0][0],pos[n1][1],pos[n1][0]) # draw line along edge
weight+=imG[cc,rr].sum() # add edge weight as sum of intensities in the underlying image along the line
edist=SL[B+w1,B+w2] # set edge Euclidean length
edist=max(1.0,edist)
fdist=1.0*np.ceil(edist) # approximate filament arc length
weight=max(1e-9,weight)
capa=1.0*weight/fdist # compute edge capacity
lgth=1.0*fdist/weight # compute edge length as inverse capacity
conn=0 # set edge connectivity variable indicating that edge belongs to periodic, non-connected connected network
jump=1 # set edge jump variable indicating that edge belongs to periodic network
multi=1 # set edge mutiplicity variable
graphz.add_edge(w2,w1,edist=edist,fdist=fdist,weight=weight,capa=capa,lgth=lgth,conn=conn,jump=jump,multi=multi) # add edge
return graphz
def compute_graph(graph,pos,mask):
"""Compute graph properties.
Parameters
----------
graph : original graph
pos : node positions
mask : binary array of cellular region of interest
Returns
-------
quanta : list of graph properties
"""
N=graph.number_of_nodes() # number of nodes
E=graph.number_of_edges() # number of edges
ca=utils.connected_components(graph) # compute sizes of connected components
C=len(ca) # number of connected components
ec=1.0*np.array([d['capa'] for u,v,d in graph.edges(data=True)]) # get edge capacities
bund=np.nanmean(ec) # compute average edge capacity ('bundling')
assort=nx.degree_pearson_correlation_coefficient(graph,weight='capa') # compute assortativity ('heterogeneity')
dist=utils.path_lengths(graph) # compute shortest path lengths
distMU=np.nanmean(dist) # compute average path length ('reachability')
distSD=np.nanstd(dist) # compute standard deviation of path lengths
distCV=1.0*distSD/distMU # compute coefficient of variation of path lengths ('disperal')
ac=np.sort(nx.laplacian_spectrum(graph,weight='capa'))[1] # compute algebraic connectivity ('robustness')
degs=utils.edge_angles(graph,pos[:,:2],mask) # compute edge angles relative to cell axis
angleMU=np.nanmean(degs) # compute average angle
angleSD=np.nanstd(degs) # compute standard deviation of angles
angleCV=1.0*angleSD/angleMU # compute coefficient of variation of angles ('contortion')
cns=utils.crossing_number(graph,pos[:,:2]) # compute number of edge crossings per edge
crossing=np.nanmean(cns) # compute average crossing number
quants=['# nodes','# edges','# connected components','avg. edge capacity','assortativity','avg. path length','CV path length','algebraic connectivity','CV edge angles','crossing number'] # list of graph property names
quanta=[N,E,C,bund,assort,distMU,distCV,ac,angleCV,crossing] # list of graph properties
return quanta
#%%############################################################################# periodic functions
def mask2rot(mask):
"""Compute main axis of cellular region of interest.
Parameters
----------
mask : binary array of cellular region of interest
Returns
-------
c0,c1 : coordinates along cell axis
vc,vd : center point and direction vector of cell axis
angle : angle between y-axis and main cell axis
rot : rotation matrix
"""
line=skimage.morphology.skeletonize(mask) # skeletonize mask
co=np.array(np.where(line>0)).T[:,::-1] # get coordinates of skeleton line
L=int(len(co)*0.2) # get points 20% and 80% along the cell axis
c0=co[L]
c1=co[-L]
vc=co[int(len(co)*0.5)] # get center point and direction vector of cell axis
vd=c0-c1
angle=utils.angle360(vd) # compute angle of cell axis
angli=angle*np.pi/180.0 # convert angle to radian
rot=np.array([[np.cos(angli),-np.sin(angli)],[np.sin(angli),np.cos(angli)]]) # compute rotation matrix
return c0,c1,vc,vd,angle,rot
def mask2poly(mask):
"""Convert cellular region of interest to polygon.
Parameters
----------
mask : binary array of cellular region of interest
Returns
-------
polya : original polygon
polyn : rotated polygon aligned with y-axis
"""
maski=sp.ndimage.minimum_filter(mask,3,mode='constant',cval=0) # shrink mask
polya=skimage.measure.find_contours(maski,0)[0] # find contours
polya=skimage.measure.approximate_polygon(polya,tolerance=0.0) # approximate polygon
polya=1.0*remove_duplicates(polya) # remove duplicate points
c0,c1,vc,vd,an,rot=mask2rot(maski) # compute cell axis
polyn=np.dot(polya,rot) # rotate polygon
return polya[:,::-1],polyn[:,::-1]
def pbc_jnet_border(polyn):
"""Compute border of jump network.
Parameters
----------
polyn : rotated polygon of cellular region of interest
Returns
-------
graph : border of jump network
"""
polyi=1.0*polyn.astype('int') # convert coordinates to integers
polys=shapely.geometry.Polygon(polyi) # convert polygon to shapely polygon
B=len(polyi) # get number of polygon points
graph=nx.empty_graph(B) # create new, empty graph
for i in range(2): # for both x- and y-components...
bx=polyi[:,i] # get coordinate
for idx,x in enumerate(set(bx)): # for each point
yy=np.sort(np.where(x==bx)[0]) # get other points with same coordinate
Y=len(yy)
for y in range(Y-1): # for each other point with same coordinate
y1,y2=yy[y],yy[y+1]
line=shapely.geometry.LineString([polyi[y1],polyi[y2]]) # draw line between the two selected points
if(line.within(polys)): # if the line is fully contained within the polygon...
graph.add_edge(y1,y2,weight=0.0,jump=0.001**i) # add the to network (jump=0.001/0.00001 lines parallel to along x/y-axis)
distb=sp.spatial.distance_matrix(polyn,polyn) # compute distance matrix between point of polygon
for b1 in range(B): # for each point along polygon
b2=np.mod(b1+1,B)
graph.add_edge(b1,b2,weight=distb[b1,b2],jump=0.0) # add edge no neighboring point
return graph
def pbc_jnet_interior(pos,polya,jborder,cthres=10.0):
"""Compute interier of jump network.
Parameters
----------
pos : node positions
polya : original polygon of cellular region of interest
jborder : border of jump network
cthres : maximum edge length between nodes of original network and border of jump network
Returns
-------
jnet : complete jump network
SP : array of shortest path lengths
SL : array of jump sizes
JV : get number of vertical jumps
JH : get number of horizonal jumps
"""
jnet=jborder.copy() # copy border of jump network
B=jnet.number_of_nodes() # get number of nodes
distn=sp.spatial.distance_matrix(pos,polya) # compute distances between node positions and border of jump network
for n in range(len(pos)): # for each node...
jnet.add_node(B+n) # add node to jump network
for e in np.where(distn[n]<cthres)[0]: # add edge if node is close enough to border of jump network
jnet.add_edge(B+n,e,weight=distn[n,e],jump=0.0)
for n in range(len(pos)): # for each node...
if(jnet.degree(B+n)==0): # add dummy edge to make network connected if node is disconnected
jnet.add_edge(B+n,0,weight=9999.0,jump=0.0)
SP=utils.all_pairs_dijkstra_path(jnet,weight='weight',jump='jump') # compute all shortest path in jump network
SX=utils.all_pairs_dijkstra_path_length(jnet,weight='weight',jump='jump') # compute all shortest path lengths in jump network
SL=1.0*np.array([[d1 for d1 in d2[0].values()] for d2 in SX.values()]) # array of shortest path lengths
SJ=1.0*np.array([[d1 for d1 in d2[1].values()] for d2 in SX.values()]) # array of jump sizes
JV=np.floor(SJ+0.5) # get number of vertical jumps
JH=np.floor(np.mod(SJ,1.0)*1000.0+0.5) # get number of horizonal jumps
return jnet,SP,SL,JV,JH
#%%############################################################################# NetworkX: shortest path algorithms for weighed graphs
# -*- coding: utf-8 -*-
#"""
#Shortest path algorithms for weighed graphs.
#"""
#__author__ = """\n""".join(['Aric Hagberg <hagberg@lanl.gov>',
# 'Loïc Séguin-C. <loicseguin@gmail.com>',
# 'Dan Schult <dschult@colgate.edu>'])
## Copyright (C) 2004-2011 by
## Aric Hagberg <hagberg@lanl.gov>
## Dan Schult <dschult@colgate.edu>
## Pieter Swart <swart@lanl.gov>
## All rights reserved.
## BSD license.
#
#__all__ = ['dijkstra_path',
# 'dijkstra_path_length',
# 'bidirectional_dijkstra',
# 'single_source_dijkstra',
# 'single_source_dijkstra_path',
# 'single_source_dijkstra_path_length',
# 'all_pairs_dijkstra_path',
# 'all_pairs_dijkstra_path_length',
# 'dijkstra_predecessor_and_distance',
# 'bellman_ford','negative_edge_cycle']
import heapq
import networkx as nx
from networkx.utils import generate_unique_node
def dijkstra_path(G, source, target, weight='weight',jump= 'jump'):
"""Returns the shortest path from source to target in a weighted graph G.
Parameters
----------
G : NetworkX graph
source : node
Starting node
target : node
Ending node
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
path : list
List of nodes in a shortest path.
Raises
------
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G=nx.path_graph(5)
>>> print(nx.dijkstra_path(G,0,4))
[0, 1, 2, 3, 4]
Notes
------
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
bidirectional_dijkstra()
"""
(length,path)=single_source_dijkstra(G, source, target=target,
weight=weight,jump=jump)
try:
return path[target]
except KeyError:
raise nx.NetworkXNoPath("node %s not reachable from %s"%(source,target))
def dijkstra_path_length(G, source, target, weight='weight',jump= 'jump'):
"""Returns the shortest path length from source to target
in a weighted graph.
Parameters
----------
G : NetworkX graph
source : node label
starting node for path
target : node label
ending node for path
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
length : number
Shortest path length.
Raises
------
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G=nx.path_graph(5)
>>> print(nx.dijkstra_path_length(G,0,4))
4
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
bidirectional_dijkstra()
"""
length=single_source_dijkstra_path_length(G, source, weight=weight,jump= jump)
try:
return length[target]
except KeyError:
raise nx.NetworkXNoPath("node %s not reachable from %s"%(source,target))
def single_source_dijkstra_path(G,source, cutoff=None, weight='weight',jump= 'jump'):
"""Compute shortest path between source and all other reachable
nodes for a weighted graph.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path.
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
paths : dictionary
Dictionary of shortest path lengths keyed by target.
Examples
--------
>>> G=nx.path_graph(5)
>>> path=nx.single_source_dijkstra_path(G,0)
>>> path[4]
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
single_source_dijkstra()
"""
(length,path)=single_source_dijkstra(G,source, weight = weight,jump= jump)
return path
def single_source_dijkstra_path_length(G, source, cutoff= None,
weight= 'weight',jump= 'jump'):
"""Compute the shortest path length between source and all other
reachable nodes for a weighted graph.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight.
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
length : dictionary
Dictionary of shortest lengths keyed by target.
Examples
--------
>>> G=nx.path_graph(5)
>>> length=nx.single_source_dijkstra_path_length(G,0)
>>> length[4]
4
>>> print(length)
{0: 0, 1: 1, 2: 2, 3: 3, 4: 4}
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
single_source_dijkstra()
"""
dist = {} # dictionary of final distances
jumq={}
seen = {source:0}
fringe=[] # use heapq with (distance,label) tuples
heapq.heappush(fringe,(0,source,0))
while fringe:
(d,v,j)=heapq.heappop(fringe)
if v in dist:
continue # already searched this node.
dist[v] = d
jumq[v] = j#jumq[v]+vw_jumq
#for ignore,w,edgedata in G.edges_iter(v,data=True):
#is about 30% slower than the following
if G.is_multigraph():
edata=[]
for w,keydata in G[v].items():
minweight=min((dd.get(weight,1)
for k,dd in keydata.items()))
edata.append((w,{weight:minweight}))
else:
edata=iter(G[v].items())
for w,edgedata in edata:
vw_jumq = jumq[v] + edgedata.get(jump,1)
ddist=edgedata.get(weight,1)
vw_dist = dist[v] + ddist
if(vw_dist<9999.0):
if(int(vw_jumq)>1 or int(vw_jumq%1.0*1000.0+0.5)>1):
ddist=9999.0
vw_dist = dist[v] + ddist
if cutoff is not None:
if vw_dist>cutoff:
continue
if w in dist:
if vw_dist < dist[w]:
raise ValueError('Contradictory paths found:',
'negative weights?')
elif w not in seen or vw_dist < seen[w]:
seen[w] = vw_dist
heapq.heappush(fringe,(vw_dist,w,vw_jumq))
return dist,jumq
def single_source_dijkstra(G,source,target=None,cutoff=None,weight='weight',jump='jump'):
"""Compute shortest paths and lengths in a weighted graph G.
Uses Dijkstra's algorithm for shortest paths.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
target : node label, optional
Ending node for path
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
distance,path : dictionaries
Returns a tuple of two dictionaries keyed by node.
The first dictionary stores distance from the source.
The second stores the path from the source to that node.
Examples
--------
>>> G=nx.path_graph(5)
>>> length,path=nx.single_source_dijkstra(G,0)
>>> print(length[4])
4
>>> print(length)
{0: 0, 1: 1, 2: 2, 3: 3, 4: 4}
>>> path[4]
[0, 1, 2, 3, 4]
Notes
---------
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
Based on the Python cookbook recipe (119466) at
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/119466
This algorithm is not guaranteed to work if edge weights
are negative or are floating point numbers
(overflows and roundoff errors can cause problems).
See Also
--------
single_source_dijkstra_path()
single_source_dijkstra_path_length()
"""
if source==target:
return ({source:0}, {source:[source]})
dist = {} # dictionary of final distances
paths = {source:[source]} # dictionary of paths
seen = {source:0}
fringe=[] # use heapq with (distance,label) tuples
heapq.heappush(fringe,(0,source))
while fringe:
(d,v)=heapq.heappop(fringe)
if v in dist:
continue # already searched this node.
dist[v] = d
if v == target:
break
#for ignore,w,edgedata in G.edges_iter(v,data=True):
#is about 30% slower than the following
if G.is_multigraph():
edata=[]
for w,keydata in G[v].items():
minweight=min((dd.get(weight,1)
for k,dd in keydata.items()))
edata.append((w,{weight:minweight}))
else:
edata=iter(G[v].items())
for w,edgedata in edata:
vw_dist = dist[v] + edgedata.get(weight,1)
if cutoff is not None:
if vw_dist>cutoff:
continue
if w in dist:
if vw_dist < dist[w]:
raise ValueError('Contradictory paths found:',
'negative weights?')
elif w not in seen or vw_dist < seen[w]:
seen[w] = vw_dist
heapq.heappush(fringe,(vw_dist,w))
paths[w] = paths[v]+[w]
return (dist,paths)
def dijkstra_predecessor_and_distance(G,source, cutoff=None, weight='weight'):
"""Compute shortest path length and predecessors on shortest paths
in weighted graphs.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
pred,distance : dictionaries
Returns two dictionaries representing a list of predecessors
of a node and the distance to each node.
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The list of predecessors contains more than one element only when
there are more than one shortest paths to the key node.
"""
push=heapq.heappush
pop=heapq.heappop
dist = {} # dictionary of final distances
pred = {source:[]} # dictionary of predecessors
seen = {source:0}
fringe=[] # use heapq with (distance,label) tuples
push(fringe,(0,source))
while fringe:
(d,v)=pop(fringe)
if v in dist: continue # already searched this node.
dist[v] = d
if G.is_multigraph():
edata=[]
for w,keydata in G[v].items():
minweight=min((dd.get(weight,1)
for k,dd in keydata.items()))
edata.append((w,{weight:minweight}))
else:
edata=iter(G[v].items())
for w,edgedata in edata:
vw_dist = dist[v] + edgedata.get(weight,1)
if cutoff is not None:
if vw_dist>cutoff:
continue
if w in dist:
if vw_dist < dist[w]:
raise ValueError('Contradictory paths found:',
'negative weights?')
elif w not in seen or vw_dist < seen[w]:
seen[w] = vw_dist
push(fringe,(vw_dist,w))
pred[w] = [v]
elif vw_dist==seen[w]:
pred[w].append(v)
return (pred,dist)
def all_pairs_dijkstra_path_length(G, cutoff=None, weight='weight',jump= 'jump'):
""" Compute shortest path lengths between all nodes in a weighted graph.
Parameters
----------
G : NetworkX graph
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
distance : dictionary
Dictionary, keyed by source and target, of shortest path lengths.
Examples
--------
>>> G=nx.path_graph(5)
>>> length=nx.all_pairs_dijkstra_path_length(G)
>>> print(length[1][4])
3
>>> length[1]
{0: 1, 1: 0, 2: 1, 3: 2, 4: 3}
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The dictionary returned only has keys for reachable node pairs.
"""
paths={}
for n in G:
paths[n]=single_source_dijkstra_path_length(G,n, cutoff=cutoff,
weight=weight,jump=jump)
return paths
def all_pairs_dijkstra_path(G, cutoff=None, weight='weight',jump='jump'):
""" Compute shortest paths between all nodes in a weighted graph.
Parameters
----------
G : NetworkX graph
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
distance : dictionary
Dictionary, keyed by source and target, of shortest paths.
Examples
--------
>>> G=nx.path_graph(5)
>>> path=nx.all_pairs_dijkstra_path(G)
>>> print(path[0][4])
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
floyd_warshall()
"""
paths={}
for n in G:
paths[n]=single_source_dijkstra_path(G, n, cutoff=cutoff,
weight=weight,jump=jump)
return paths
def bellman_ford(G, source, weight = 'weight'):
"""Compute shortest path lengths and predecessors on shortest paths
in weighted graphs.
The algorithm has a running time of O(mn) where n is the number of
nodes and m is the number of edges. It is slower than Dijkstra but
can handle negative edge weights.
Parameters
----------
G : NetworkX graph
The algorithm works for all types of graphs, including directed
graphs and multigraphs.
source: node label
Starting node for path
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
pred, dist : dictionaries
Returns two dictionaries keyed by node to predecessor in the
path and to the distance from the source respectively.
Raises
------
NetworkXUnbounded
If the (di)graph contains a negative cost (di)cycle, the
algorithm raises an exception to indicate the presence of the
negative cost (di)cycle. Note: any negative weight edge in an
undirected graph is a negative cost cycle.
Examples
--------
>>> import networkx as nx
>>> G = nx.path_graph(5, create_using = nx.DiGraph())
>>> pred, dist = nx.bellman_ford(G, 0)
>>> pred
{0: None, 1: 0, 2: 1, 3: 2, 4: 3}
>>> dist
{0: 0, 1: 1, 2: 2, 3: 3, 4: 4}
>>> from nose.tools import assert_raises
>>> G = nx.cycle_graph(5, create_using = nx.DiGraph())
>>> G[1][2]['weight'] = -7
>>> assert_raises(nx.NetworkXUnbounded, nx.bellman_ford, G, 0)
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The dictionaries returned only have keys for nodes reachable from
the source.
In the case where the (di)graph is not connected, if a component
not containing the source contains a negative cost (di)cycle, it
will not be detected.
"""
if source not in G:
raise KeyError("Node %s is not found in the graph"%source)
numb_nodes = len(G)
dist = {source: 0}
pred = {source: None}
if numb_nodes == 1:
return pred, dist
if G.is_multigraph():
def get_weight(edge_dict):
return min([eattr.get(weight,1) for eattr in edge_dict.values()])
else:
def get_weight(edge_dict):
return edge_dict.get(weight,1)
for i in range(numb_nodes):
no_changes=True
# Only need edges from nodes in dist b/c all others have dist==inf
for u, dist_u in list(dist.items()): # get all edges from nodes in dist
for v, edict in G[u].items(): # double loop handles undirected too
dist_v = dist_u + get_weight(edict)
if v not in dist or dist[v] > dist_v:
dist[v] = dist_v
pred[v] = u
no_changes = False
if no_changes:
break
else:
raise nx.NetworkXUnbounded("Negative cost cycle detected.")
return pred, dist
def negative_edge_cycle(G, weight = 'weight'):
"""Return True if there exists a negative edge cycle anywhere in G.
Parameters
----------
G : NetworkX graph
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
negative_cycle : bool
True if a negative edge cycle exists, otherwise False.
Examples
--------
>>> import networkx as nx
>>> G = nx.cycle_graph(5, create_using = nx.DiGraph())
>>> print(nx.negative_edge_cycle(G))
False
>>> G[1][2]['weight'] = -7
>>> print(nx.negative_edge_cycle(G))
True
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
This algorithm uses bellman_ford() but finds negative cycles
on any component by first adding a new node connected to
every node, and starting bellman_ford on that node. It then
removes that extra node.
"""
newnode = generate_unique_node()
G.add_edges_from([ (newnode,n) for n in G])
try:
bellman_ford(G, newnode, weight)
except nx.NetworkXUnbounded:
G.remove_node(newnode)
return True
G.remove_node(newnode)
return False
def bidirectional_dijkstra(G, source, target, weight = 'weight'):
"""Dijkstra's algorithm for shortest paths using bidirectional search.
Parameters
----------
G : NetworkX graph
source : node
Starting node.
target : node
Ending node.
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
length : number
Shortest path length.
Returns a tuple of two dictionaries keyed by node.
The first dictionary stores distance from the source.
The second stores the path from the source to that node.
Raises
------
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G=nx.path_graph(5)
>>> length,path=nx.bidirectional_dijkstra(G,0,4)
>>> print(length)
4
>>> print(path)
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
In practice bidirectional Dijkstra is much more than twice as fast as
ordinary Dijkstra.
Ordinary Dijkstra expands nodes in a sphere-like manner from the
source. The radius of this sphere will eventually be the length
of the shortest path. Bidirectional Dijkstra will expand nodes
from both the source and the target, making two spheres of half
this radius. Volume of the first sphere is pi*r*r while the
others are 2*pi*r/2*r/2, making up half the volume.
This algorithm is not guaranteed to work if edge weights
are negative or are floating point numbers
(overflows and roundoff errors can cause problems).
See Also
--------
shortest_path
shortest_path_length
"""
if source == target: return (0, [source])
#Init: Forward Backward
dists = [{}, {}]# dictionary of final distances
paths = [{source:[source]}, {target:[target]}] # dictionary of paths
fringe = [[], []] #heap of (distance, node) tuples for extracting next node to expand
seen = [{source:0}, {target:0} ]#dictionary of distances to nodes seen
#initialize fringe heap
heapq.heappush(fringe[0], (0, source))
heapq.heappush(fringe[1], (0, target))
#neighs for extracting correct neighbor information
if G.is_directed():
neighs = [G.successors_iter, G.predecessors_iter]
else:
neighs = [G.neighbors_iter, G.neighbors_iter]
#variables to hold shortest discovered path
#finaldist = 1e30000
finalpath = []
dir = 1
while fringe[0] and fringe[1]:
# choose direction
# dir == 0 is forward direction and dir == 1 is back
dir = 1-dir
# extract closest to expand
(dist, v )= heapq.heappop(fringe[dir])
if v in dists[dir]:
# Shortest path to v has already been found
continue
# update distance
dists[dir][v] = dist #equal to seen[dir][v]
if v in dists[1-dir]:
# if we have scanned v in both directions we are done
# we have now discovered the shortest path
return (finaldist,finalpath)
for w in neighs[dir](v):
if(dir==0): #forward
if G.is_multigraph():
minweight=min((dd.get(weight,1)
for k,dd in G[v][w].items()))
else:
minweight=G[v][w].get(weight,1)
vwLength = dists[dir][v] + minweight #G[v][w].get(weight,1)
else: #back, must remember to change v,w->w,v
if G.is_multigraph():
minweight=min((dd.get(weight,1)
for k,dd in G[w][v].items()))
else:
minweight=G[w][v].get(weight,1)
vwLength = dists[dir][v] + minweight #G[w][v].get(weight,1)
if w in dists[dir]:
if vwLength < dists[dir][w]:
raise ValueError("Contradictory paths found: negative weights?")
elif w not in seen[dir] or vwLength < seen[dir][w]:
# relaxing
seen[dir][w] = vwLength
heapq.heappush(fringe[dir], (vwLength,w))
paths[dir][w] = paths[dir][v]+[w]
if w in seen[0] and w in seen[1]:
#see if this path is better than than the already
#discovered shortest path
totaldist = seen[0][w] + seen[1][w]
if finalpath == [] or finaldist > totaldist:
finaldist = totaldist
revpath = paths[1][w][:]
revpath.reverse()
finalpath = paths[0][w] + revpath[1:]
raise nx.NetworkXNoPath("No path between %s and %s." % (source, target))
| gpl-3.0 |
cl4rke/scikit-learn | sklearn/externals/joblib/parallel.py | 36 | 34375 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
import gc
import warnings
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmapingPool
from multiprocessing.pool import ThreadPool
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_kbytes
from ._compat import _basestring
VALID_BACKENDS = ['multiprocessing', 'threading']
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
# In seconds, should be big enough to hide multiprocessing dispatching
# overhead.
# This settings was found by running benchmarks/bench_auto_batching.py
# with various parameters on various platforms.
MIN_IDEAL_BATCH_DURATION = .2
# Should not be too high to avoid stragglers: long jobs running alone
# on a single worker while other workers have no work to process any more.
MAX_IDEAL_BATCH_DURATION = 2
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
""" Return the number of CPUs.
"""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
if issubclass(e_type, TransportableException):
raise
else:
raise TransportableException(text, e_type)
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateComputeBatch(object):
"""Sequential computation of a batch of tasks.
This replicates the async computation API but actually does not delay
the computations when joblib.Parallel runs in sequential mode.
"""
def __init__(self, batch):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = batch()
def get(self):
return self.results
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
if (self.parallel.batch_size == 'auto'
and self.batch_size == self.parallel._effective_batch_size):
# Update the smoothed streaming estimate of the duration of a batch
# from dispatch to completion
old_duration = self.parallel._smoothed_batch_duration
if old_duration == 0:
# First record of duration for this batch size after the last
# reset.
new_duration = this_batch_duration
else:
# Update the exponentially weighted average of the duration of
# batch for the current effective size.
new_duration = 0.8 * old_duration + 0.2 * this_batch_duration
self.parallel._smoothed_batch_duration = new_duration
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str or None, default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers shoud never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend='multiprocessing', verbose=0,
pre_dispatch='2 * n_jobs', batch_size='auto', temp_folder=None,
max_nbytes='1M', mmap_mode='r'):
self.verbose = verbose
self._mp_context = None
if backend is None:
# `backend=None` was supported in 0.8.2 with this effect
backend = "multiprocessing"
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._mp_context = backend
backend = "multiprocessing"
if backend not in VALID_BACKENDS:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, VALID_BACKENDS))
self.backend = backend
self.n_jobs = n_jobs
if (batch_size == 'auto'
or isinstance(batch_size, Integral) and batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self.pre_dispatch = pre_dispatch
self._pool = None
self._temp_folder = temp_folder
if isinstance(max_nbytes, _basestring):
self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes)
else:
self._max_nbytes = max_nbytes
self._mmap_mode = mmap_mode
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it.
self._output = None
self._jobs = list()
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
if self._pool is None:
job = ImmediateComputeBatch(batch)
self._jobs.append(job)
self.n_dispatched_batches += 1
self.n_dispatched_tasks += len(batch)
self.n_completed_tasks += len(batch)
if not _verbosity_filter(self.n_dispatched_batches, self.verbose):
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(time.time() - self._start_time)
))
else:
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._pool.apply_async(SafeFunction(batch), callback=cb)
self._jobs.append(job)
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto' and self.backend == 'threading':
# Batching is never beneficial with the threading backend
batch_size = 1
elif self.batch_size == 'auto':
old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if (batch_duration > 0 and
batch_duration < MIN_IDEAL_BATCH_DURATION):
# The current batch size is too small: the duration of the
# processing of a batch of task is not large enough to hide
# the scheduling overhead.
ideal_batch_size = int(
old_batch_size * MIN_IDEAL_BATCH_DURATION / batch_duration)
# Multiply by two to limit oscilations between min and max.
batch_size = max(2 * ideal_batch_size, 1)
self._effective_batch_size = batch_size
if self.verbose >= 10:
self._print("Batch computation too fast (%.4fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
elif (batch_duration > MAX_IDEAL_BATCH_DURATION and
old_batch_size >= 2):
# The current batch size is too big. If we schedule overly long
# running batches some CPUs might wait with nothing left to do
# while a couple of CPUs a left processing a few long running
# batches. Better reduce the batch size a bit to limit the
# likelihood of scheduling such stragglers.
self._effective_batch_size = batch_size = old_batch_size // 2
if self.verbose >= 10:
self._print("Batch computation too slow (%.2fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
else:
# No batch size adjustment
batch_size = old_batch_size
if batch_size != old_batch_size:
# Reset estimation of the smoothed mean batch duration: this
# estimate is updated in the multiprocessing apply_async
# CallBack as long as the batch_size is constant. Therefore
# we need to reset the estimate whenever we re-tune the batch
# size.
self._smoothed_batch_duration = 0
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if not tasks:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._original_iterator:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time),
))
else:
index = self.n_dispatched_batches
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1
- self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched_tasks - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job queue can be filling up as
# we empty it
if hasattr(self, '_lock'):
self._lock.acquire()
job = self._jobs.pop(0)
if hasattr(self, '_lock'):
self._lock.release()
try:
self._output.extend(job.get())
except tuple(self.exceptions) as exception:
try:
self._aborting = True
self._lock.acquire()
if isinstance(exception,
(KeyboardInterrupt, WorkerInterrupt)):
# We have captured a user interruption, clean up
# everything
if hasattr(self, '_pool'):
self._pool.close()
self._pool.terminate()
# We can now allow subprocesses again
os.environ.pop('__JOBLIB_SPAWNED_PARALLEL__', 0)
raise exception
elif isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (
this_report,
exception.message,
)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
raise exception_type(report)
raise exception
finally:
self._lock.release()
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
if n_jobs < 0 and mp is not None:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
self._lock = threading.Lock()
# Whether or not to set an environment flag to track
# multiple process spawning
set_environ_flag = False
if (n_jobs is None or mp is None or n_jobs == 1):
n_jobs = 1
self._pool = None
elif self.backend == 'threading':
self._pool = ThreadPool(n_jobs)
elif self.backend == 'multiprocessing':
if mp.current_process().daemon:
# Daemonic processes cannot have children
n_jobs = 1
self._pool = None
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=2)
elif threading.current_thread().name != 'MainThread':
# Prevent posix fork inside in non-main posix threads
n_jobs = 1
self._pool = None
warnings.warn(
'Multiprocessing backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=2)
else:
already_forked = int(os.environ.get('__JOBLIB_SPAWNED_PARALLEL__', 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Make sure to free as much memory as possible before forking
gc.collect()
# Set an environment variable to avoid infinite loops
set_environ_flag = True
poolargs = dict(
max_nbytes=self._max_nbytes,
mmap_mode=self._mmap_mode,
temp_folder=self._temp_folder,
verbose=max(0, self.verbose - 50),
context_id=0, # the pool is used only for one call
)
if self._mp_context is not None:
# Use Python 3.4+ multiprocessing context isolation
poolargs['context'] = self._mp_context
self._pool = MemmapingPool(n_jobs, **poolargs)
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
else:
raise ValueError("Unsupported backend: %s" % self.backend)
if self.batch_size == 'auto':
self._effective_batch_size = 1
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
self._smoothed_batch_duration = 0.0
try:
if set_environ_flag:
# Set an environment variable to avoid infinite loops
os.environ[JOBLIB_SPAWNED_PROCESS] = '1'
self._iterating = True
while self.dispatch_one_batch(iterator):
pass
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output),
len(self._output),
short_format_time(elapsed_time)
))
finally:
if n_jobs > 1:
self._pool.close()
self._pool.terminate() # terminate does a join()
if self.backend == 'multiprocessing':
os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0)
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| bsd-3-clause |
sureshthalamati/spark | python/pyspark/sql/session.py | 2 | 34711 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
xrange = range
else:
from itertools import izip as zip, imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, TimestampType, \
_make_type_verifier, _infer_schema, _has_nulltype, _merge_type, _create_converter, \
_parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
.. autoattribute:: builder
:annotation:
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive serdes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
sc = SparkContext.getOrCreate(sparkConf)
# This SparkContext may be an existing one.
for key, value in self._options.items():
# we need to propagate the confs
# before we create the SparkSession. Otherwise, confs like
# warehouse path and metastore url will not be set correctly (
# these confs cannot be changed once the SparkSession is created).
sc._conf.set(key, value)
session = SparkSession(sc)
for key, value in self._options.items():
session._jsparkSession.sessionState().conf().setConfString(key, value)
for key, value in self._options.items():
session.sparkContext._conf.set(key, value)
return session
builder = Builder()
"""A class attribute having a :class:`Builder` to construct :class:`SparkSession` instances"""
_instantiatedSession = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
if self._jvm.SparkSession.getDefaultSession().isDefined() \
and not self._jvm.SparkSession.getDefaultSession().get() \
.sparkContext().isStopped():
jsparkSession = self._jvm.SparkSession.getDefaultSession().get()
else:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
self._jvm.SparkSession.setDefaultSession(self._jsparkSession)
def _repr_html_(self):
return """
<div>
<p><b>SparkSession - {catalogImplementation}</b></p>
{sc_HTML}
</div>
""".format(
catalogImplementation=self.conf.get("spark.sql.catalogImplementation"),
sc_HTML=self.sparkContext._repr_html_()
)
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
:return: :class:`Catalog`
"""
from pyspark.sql.catalog import Catalog
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.udf import UDFRegistration
return UDFRegistration(self)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data, names=None):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:param names: list of column names
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, (_infer_schema(row, names) for row in data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None, names=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first, names=names)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row, names=names))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(lambda row: _infer_schema(row, names)).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio, names=schema)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data, names=schema)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
def _get_numpy_record_dtype(self, rec):
"""
Used when converting a pandas.DataFrame to Spark using to_records(), this will correct
the dtypes of fields in a record so they can be properly loaded into Spark.
:param rec: a numpy record to check field dtypes
:return corrected dtype for a numpy.record or None if no correction needed
"""
import numpy as np
cur_dtypes = rec.dtype
col_names = cur_dtypes.names
record_type_list = []
has_rec_fix = False
for i in xrange(len(cur_dtypes)):
curr_type = cur_dtypes[i]
# If type is a datetime64 timestamp, convert to microseconds
# NOTE: if dtype is datetime[ns] then np.record.tolist() will output values as longs,
# conversion from [us] or lower will lead to py datetime objects, see SPARK-22417
if curr_type == np.dtype('datetime64[ns]'):
curr_type = 'datetime64[us]'
has_rec_fix = True
record_type_list.append((str(col_names[i]), curr_type))
return np.dtype(record_type_list) if has_rec_fix else None
def _convert_from_pandas(self, pdf, schema, timezone):
"""
Convert a pandas.DataFrame to list of records that can be used to make a DataFrame
:return list of records
"""
if timezone is not None:
from pyspark.sql.types import _check_series_convert_timestamps_tz_local
copied = False
if isinstance(schema, StructType):
for field in schema:
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if isinstance(field.dataType, TimestampType):
s = _check_series_convert_timestamps_tz_local(pdf[field.name], timezone)
if s is not pdf[field.name]:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[field.name] = s
else:
for column, series in pdf.iteritems():
s = _check_series_convert_timestamps_tz_local(series, timezone)
if s is not series:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[column] = s
# Convert pandas.DataFrame to list of numpy records
np_records = pdf.to_records(index=False)
# Check if any columns need to be fixed for Spark to infer properly
if len(np_records) > 0:
record_dtype = self._get_numpy_record_dtype(np_records[0])
if record_dtype is not None:
return [r.astype(record_dtype).tolist() for r in np_records]
# Convert list of numpy records to python lists
return [r.tolist() for r in np_records]
def _create_from_pandas_with_arrow(self, pdf, schema, timezone):
"""
Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting
to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the
data types will be used to coerce the data in Pandas to Arrow conversion.
"""
from pyspark.serializers import ArrowSerializer, _create_batch
from pyspark.sql.types import from_arrow_schema, to_arrow_type, TimestampType
from pyspark.sql.utils import require_minimum_pandas_version, \
require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
# Determine arrow types to coerce data when creating batches
if isinstance(schema, StructType):
arrow_types = [to_arrow_type(f.dataType) for f in schema.fields]
elif isinstance(schema, DataType):
raise ValueError("Single data type %s is not supported with Arrow" % str(schema))
else:
# Any timestamps must be coerced to be compatible with Spark
arrow_types = [to_arrow_type(TimestampType())
if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None
for t in pdf.dtypes]
# Slice the DataFrame to be batched
step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up
pdf_slices = (pdf[start:start + step] for start in xrange(0, len(pdf), step))
# Create Arrow record batches
batches = [_create_batch([(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)],
timezone)
for pdf_slice in pdf_slices]
# Create the Spark schema from the first Arrow batch (always at least 1 batch after slicing)
if isinstance(schema, (list, tuple)):
struct = from_arrow_schema(batches[0].schema)
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
# Create the Spark DataFrame directly from the Arrow data and schema
jrdd = self._sc._serialize_to_jvm(batches, len(batches), ArrowSerializer())
jdf = self._jvm.PythonSQLUtils.arrowPayloadToDataFrame(
jrdd, schema.json(), self._wrapped._jsqlContext)
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
elif isinstance(schema, (list, tuple)):
# Must re-encode any unicode strings to be consistent with StructField names
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
if self.conf.get("spark.sql.execution.pandas.respectSessionTimeZone").lower() \
== "true":
timezone = self.conf.get("spark.sql.session.timeZone")
else:
timezone = None
# If no schema supplied by user then get the names of columns only
if schema is None:
schema = [str(x) if not isinstance(x, basestring) else
(x.encode('utf-8') if not isinstance(x, str) else x)
for x in data.columns]
if self.conf.get("spark.sql.execution.arrow.enabled", "false").lower() == "true" \
and len(data) > 0:
try:
return self._create_from_pandas_with_arrow(data, schema, timezone)
except Exception as e:
from pyspark.util import _exception_message
if self.conf.get("spark.sql.execution.arrow.fallback.enabled", "true") \
.lower() == "true":
msg = (
"createDataFrame attempted Arrow optimization because "
"'spark.sql.execution.arrow.enabled' is set to true; however, "
"failed by the reason below:\n %s\n"
"Attempts non-optimization as "
"'spark.sql.execution.arrow.fallback.enabled' is set to "
"true." % _exception_message(e))
warnings.warn(msg)
else:
msg = (
"createDataFrame attempted Arrow optimization because "
"'spark.sql.execution.arrow.enabled' is set to true; however, "
"failed by the reason below:\n %s\n"
"For fallback to non-optimization automatically, please set true to "
"'spark.sql.execution.arrow.fallback.enabled'." % _exception_message(e))
raise RuntimeError(msg)
data = self._convert_from_pandas(data, schema, timezone)
if isinstance(schema, StructType):
verify_func = _make_type_verifier(schema) if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
verify_func = _make_type_verifier(
dataType, name="field value") if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj,
else:
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Evolving.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
# We should clean the default session up. See SPARK-23228.
self._jvm.SparkSession.clearDefaultSession()
SparkSession._instantiatedSession = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
intel-analytics/analytics-zoo | pyzoo/zoo/chronos/model/tcmf/DeepGLO.py | 1 | 32279 | # Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is adapted from the DeepGlo Project. https://github.com/rajatsen91/deepglo
#
# Note: This license has also been called the "New BSD License" or "Modified BSD License". See also
# the 2-clause BSD License.
#
# Copyright (c) 2019 The DeepGLO Project.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import torch
import numpy as np
import pandas as pd
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from zoo.chronos.model.tcmf.data_loader import TCMFDataLoader
from zoo.chronos.model.tcmf.local_model import TemporalConvNet, LocalModel
from zoo.chronos.model.tcmf.time import TimeCovariates
import copy
import pickle
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
logger.addHandler(console)
def get_model(A, y, lamb=0):
"""
Regularized least-squares
"""
n_col = A.shape[1]
return np.linalg.lstsq(
A.T.dot(A) + lamb * np.identity(n_col), A.T.dot(y), rcond=None
)
class DeepGLO(object):
def __init__(
self,
vbsize=150,
hbsize=256,
num_channels_X=[32, 32, 32, 32, 1],
num_channels_Y=[32, 32, 32, 32, 1],
kernel_size=7,
dropout=0.2,
rank=64,
kernel_size_Y=7,
lr=0.0005,
normalize=False,
use_time=True,
svd=False,
forward_cov=False,
):
self.use_time = use_time
self.dropout = dropout
self.forward_cov = forward_cov
self.Xseq = TemporalConvNet(
num_inputs=1,
num_channels=num_channels_X,
kernel_size=kernel_size,
dropout=dropout,
init=True,
)
self.vbsize = vbsize
self.hbsize = hbsize
self.num_channels_X = num_channels_X
self.num_channels_Y = num_channels_Y
self.kernel_size_Y = kernel_size_Y
self.rank = rank
self.kernel_size = kernel_size
self.lr = lr
self.normalize = normalize
self.svd = svd
def tensor2d_to_temporal(self, T):
T = T.view(1, T.size(0), T.size(1))
T = T.transpose(0, 1)
return T
def temporal_to_tensor2d(self, T):
T = T.view(T.size(0), T.size(2))
return T
def calculate_newX_loss_vanilla(self, Xn, Fn, Yn, Xf, alpha):
Yout = torch.mm(Fn, Xn)
cr1 = nn.L1Loss()
cr2 = nn.MSELoss()
l1 = cr2(Yout, Yn) / torch.mean(Yn ** 2)
l2 = cr2(Xn, Xf) / torch.mean(Xf ** 2)
return (1 - alpha) * l1 + alpha * l2
def recover_future_X(
self,
last_step,
future,
num_epochs=50,
alpha=0.5,
vanilla=True,
tol=1e-7,
):
rg = max(
1 + 2 * (self.kernel_size - 1) * 2 ** (len(self.num_channels_X) - 1),
1 + 2 * (self.kernel_size_Y - 1) * 2 ** (len(self.num_channels_Y) - 1),
)
X = self.X[:, last_step - rg: last_step]
X = self.tensor2d_to_temporal(X)
outX = self.predict_future(model=self.Xseq, inp=X, future=future)
outX = self.temporal_to_tensor2d(outX)
Xf = outX[:, -future::]
Yn = self.Ymat[:, last_step: last_step + future]
Yn = torch.from_numpy(Yn).float()
Fn = self.F
Xt = torch.zeros(self.rank, future).float()
Xn = torch.normal(Xt, 0.1)
lprev = 0
for i in range(num_epochs):
Xn = Variable(Xn, requires_grad=True)
optim_Xn = optim.Adam(params=[Xn], lr=self.lr)
optim_Xn.zero_grad()
loss = self.calculate_newX_loss_vanilla(
Xn, Fn.detach(), Yn.detach(), Xf.detach(), alpha
)
loss.backward()
optim_Xn.step()
# Xn = torch.clamp(Xn.detach(), min=0)
if np.abs(lprev - loss.item()) <= tol:
break
if i % 1000 == 0:
print(f"Recovery Loss of epoch {i} is: " + str(loss.item()))
lprev = loss.item()
return Xn.detach()
def step_factX_loss(self, inp, out, last_vindex, last_hindex, reg=0.0):
Xout = self.X[:, last_hindex + 1: last_hindex + 1 + out.size(2)]
Fout = self.F[self.D.I[last_vindex: last_vindex + out.size(0)], :]
Xout = Variable(Xout, requires_grad=True)
out = self.temporal_to_tensor2d(out)
optim_X = optim.Adam(params=[Xout], lr=self.lr)
Hout = torch.matmul(Fout, Xout)
optim_X.zero_grad()
loss = torch.mean(torch.pow(Hout - out.detach(), 2))
l2 = torch.mean(torch.pow(Xout, 2))
r = loss.detach() / l2.detach()
loss = loss + r * reg * l2
loss.backward()
optim_X.step()
# Xout = torch.clamp(Xout, min=0)
self.X[:, last_hindex + 1: last_hindex + 1 + inp.size(2)] = Xout.detach()
return loss
def step_factF_loss(self, inp, out, last_vindex, last_hindex, reg=0.0):
Xout = self.X[:, last_hindex + 1: last_hindex + 1 + out.size(2)]
Fout = self.F[self.D.I[last_vindex: last_vindex + out.size(0)], :]
Fout = Variable(Fout, requires_grad=True)
optim_F = optim.Adam(params=[Fout], lr=self.lr)
out = self.temporal_to_tensor2d(out)
Hout = torch.matmul(Fout, Xout)
optim_F.zero_grad()
loss = torch.mean(torch.pow(Hout - out.detach(), 2))
l2 = torch.mean(torch.pow(Fout, 2))
r = loss.detach() / l2.detach()
loss = loss + r * reg * l2
loss.backward()
optim_F.step()
self.F[
self.D.I[last_vindex: last_vindex + inp.size(0)], :
] = Fout.detach()
return loss
def step_temporal_loss_X(self, inp, last_vindex, last_hindex):
Xin = self.X[:, last_hindex: last_hindex + inp.size(2)]
Xout = self.X[:, last_hindex + 1: last_hindex + 1 + inp.size(2)]
for p in self.Xseq.parameters():
p.requires_grad = False
Xin = Variable(Xin, requires_grad=True)
Xout = Variable(Xout, requires_grad=True)
optim_out = optim.Adam(params=[Xout], lr=self.lr)
Xin = self.tensor2d_to_temporal(Xin)
Xout = self.tensor2d_to_temporal(Xout)
hatX = self.Xseq(Xin)
optim_out.zero_grad()
loss = torch.mean(torch.pow(Xout - hatX.detach(), 2))
loss.backward()
optim_out.step()
# Xout = torch.clamp(Xout, min=0)
temp = self.temporal_to_tensor2d(Xout.detach())
self.X[:, last_hindex + 1: last_hindex + 1 + inp.size(2)] = temp
return loss
def predict_future_batch(self, model, inp, future=10):
out = model(inp)
output = out[:, :, out.size(2) - 1].view(out.size(0), out.size(1), 1)
out = torch.cat((inp, output), dim=2)
for i in range(future - 1):
inp = out
out = model(inp)
output = out[:, :, out.size(2) - 1].view(out.size(0), out.size(1), 1)
out = torch.cat((inp, output), dim=2)
out = self.temporal_to_tensor2d(out)
out = np.array(out.detach())
return out
def predict_future(self, model, inp, future=10, bsize=90):
n = inp.size(0)
ids = np.arange(0, n, bsize)
ids = list(ids) + [n]
out = self.predict_future_batch(model, inp[ids[0]: ids[1], :, :], future)
for i in range(1, len(ids) - 1):
temp = self.predict_future_batch(
model, inp[ids[i]: ids[i + 1], :, :], future
)
out = np.vstack([out, temp])
out = torch.from_numpy(out).float()
return self.tensor2d_to_temporal(out)
def predict_global(
self, ind, last_step=100, future=10, normalize=False, bsize=90
):
if ind is None:
ind = np.arange(self.Ymat.shape[0])
self.Xseq = self.Xseq.eval()
rg = max(
1 + 2 * (self.kernel_size - 1) * 2 ** (len(self.num_channels_X) - 1),
1 + 2 * (self.kernel_size_Y - 1) * 2 ** (len(self.num_channels_Y) - 1),
)
X = self.X[:, last_step - rg: last_step]
n = X.size(0)
T = X.size(1)
X = self.tensor2d_to_temporal(X)
outX = self.predict_future(
model=self.Xseq, inp=X, future=future, bsize=bsize
)
outX = self.temporal_to_tensor2d(outX)
F = self.F
Y = torch.matmul(F, outX)
Y = np.array(Y[ind, :].detach())
del F
for p in self.Xseq.parameters():
p.requires_grad = True
if normalize:
Y = Y - self.mini
Y = Y * self.s[ind, None] + self.m[ind, None]
return Y
else:
return Y
def train_Xseq(self, Ymat, num_epochs=20, val_len=24, early_stop=False, tenacity=3):
seq = self.Xseq
num_channels = self.num_channels_X
kernel_size = self.kernel_size
vbsize = min(self.vbsize, Ymat.shape[0] / 2)
for p in seq.parameters():
p.requires_grad = True
TC = LocalModel(
Ymat=Ymat,
num_inputs=1,
num_channels=num_channels,
kernel_size=kernel_size,
vbsize=vbsize,
hbsize=self.hbsize,
normalize=False,
end_index=self.end_index - val_len,
val_len=val_len,
lr=self.lr,
)
TC.train_model(num_epochs=num_epochs, early_stop=early_stop, tenacity=tenacity)
self.Xseq = TC.seq
def train_factors(
self,
reg_X=0.0,
reg_F=0.0,
mod=5,
val_len=24,
early_stop=False,
tenacity=3,
ind=None,
seed=False,
):
self.D.epoch = 0
self.D.vindex = 0
self.D.hindex = 0
for p in self.Xseq.parameters():
p.requires_grad = True
l_F = [0.0]
l_X = [0.0]
l_X_temporal = [0.0]
iter_count = 0
vae = float("inf")
scount = 0
Xbest = self.X.clone()
Fbest = self.F.clone()
while self.D.epoch < self.num_epochs:
last_epoch = self.D.epoch
last_vindex = self.D.vindex
last_hindex = self.D.hindex
inp, out, vindex, hindex = self.D.next_batch()
step_l_F = self.step_factF_loss(inp, out, last_vindex, last_hindex, reg=reg_F)
l_F = l_F + [step_l_F.item()]
step_l_X = self.step_factX_loss(inp, out, last_vindex, last_hindex, reg=reg_X)
l_X = l_X + [step_l_X.item()]
if seed is False and iter_count % mod == 1:
l2 = self.step_temporal_loss_X(inp, last_vindex, last_hindex)
l_X_temporal = l_X_temporal + [l2.item()]
iter_count = iter_count + 1
if self.D.epoch > last_epoch:
print("Entering Epoch#{}".format(self.D.epoch))
print("Factorization Loss F:{}".format(np.mean(l_F)))
print("Factorization Loss X:{}".format(np.mean(l_X)))
print("Temporal Loss X:{}".format(np.mean(l_X_temporal)))
if ind is None:
ind = np.arange(self.Ymat.shape[0])
else:
ind = ind
inp = self.predict_global(
ind,
last_step=self.end_index - val_len,
future=val_len,
)
R = self.Ymat[ind, self.end_index - val_len: self.end_index]
S = inp[:, -val_len::]
ve = np.abs(R - S).mean() / np.abs(R).mean()
# print("Validation Loss (Global): ", ve)
print("Validation Loss (Global):{}".format(ve))
if ve <= vae:
vae = ve
scount = 0
Xbest = self.X.clone()
Fbest = self.F.clone()
# Xseqbest = TemporalConvNet(
# num_inputs=1,
# num_channels=self.num_channels_X,
# kernel_size=self.kernel_size,
# dropout=self.dropout,
# )
# Xseqbest.load_state_dict(self.Xseq.state_dict())
Xseqbest = pickle.loads(pickle.dumps(self.Xseq))
else:
scount += 1
if scount > tenacity and early_stop:
# print("Early Stopped")
print("Early Stopped")
self.X = Xbest
self.F = Fbest
self.Xseq = Xseqbest
break
def create_Ycov(self):
t0 = self.end_index + 1
self.D.epoch = 0
self.D.vindex = 0
self.D.hindex = 0
Ycov = copy.deepcopy(self.Ymat[:, 0:t0])
Ymat_now = self.Ymat[:, 0:t0]
self.Xseq = self.Xseq.eval()
while self.D.epoch < 1:
last_epoch = self.D.epoch
last_vindex = self.D.vindex
last_hindex = self.D.hindex
inp, out, vindex, hindex = self.D.next_batch()
Xin = self.tensor2d_to_temporal(self.X[:, last_hindex: last_hindex + inp.size(2)])
Xout = self.temporal_to_tensor2d(self.Xseq(Xin))
Fout = self.F[self.D.I[last_vindex: last_vindex + out.size(0)], :]
output = np.array(torch.matmul(Fout, Xout).detach())
Ycov[
last_vindex: last_vindex + output.shape[0],
last_hindex + 1: last_hindex + 1 + output.shape[1],
] = output
for p in self.Xseq.parameters():
p.requires_grad = True
if self.period is None:
Ycov_wc = np.zeros(shape=[Ycov.shape[0], 1, Ycov.shape[1]])
if self.forward_cov:
Ycov_wc[:, 0, 0:-1] = Ycov[:, 1::]
else:
Ycov_wc[:, 0, :] = Ycov
else:
Ycov_wc = np.zeros(shape=[Ycov.shape[0], 2, Ycov.shape[1]])
if self.forward_cov:
Ycov_wc[:, 0, 0:-1] = Ycov[:, 1::]
else:
Ycov_wc[:, 0, :] = Ycov
Ycov_wc[:, 1, self.period - 1::] = Ymat_now[:, 0: -(self.period - 1)]
return Ycov_wc
def train_Yseq(self, num_epochs=20,
covariates=None,
dti=None,
val_len=24,
num_workers=1,
):
Ycov = self.create_Ycov()
self.Yseq = LocalModel(
self.Ymat,
num_inputs=1,
num_channels=self.num_channels_Y,
kernel_size=self.kernel_size_Y,
dropout=self.dropout,
vbsize=self.vbsize,
hbsize=self.hbsize,
lr=self.lr,
val_len=val_len,
test=True,
end_index=self.end_index - val_len,
normalize=False,
start_date=self.start_date,
freq=self.freq,
covariates=covariates,
use_time=self.use_time,
dti=dti,
Ycov=Ycov,
)
val_loss = self.Yseq.train_model(num_epochs=num_epochs,
num_workers=num_workers,
early_stop=False)
return val_loss
def train_all_models(
self,
Ymat,
val_len=24,
start_date="2016-1-1",
freq="H",
covariates=None,
dti=None,
period=None,
init_epochs=100,
alt_iters=10,
y_iters=200,
tenacity=7,
mod=5,
max_FX_epoch=300,
max_TCN_epoch=300,
num_workers=1,
):
self.end_index = Ymat.shape[1]
self.start_date = start_date
self.freq = freq
self.period = period
self.covariates = covariates
self.dti = dti
if self.normalize:
self.s = np.std(Ymat[:, 0:self.end_index], axis=1)
# self.s[self.s == 0] = 1.0
self.s += 1.0
self.m = np.mean(Ymat[:, 0:self.end_index], axis=1)
self.Ymat = (Ymat - self.m[:, None]) / self.s[:, None]
self.mini = np.abs(np.min(self.Ymat))
self.Ymat = self.Ymat + self.mini
else:
self.Ymat = Ymat
n, T = self.Ymat.shape
t0 = self.end_index + 1
if t0 > T:
self.Ymat = np.hstack([self.Ymat, self.Ymat[:, -1].reshape(-1, 1)])
if self.svd:
indices = np.random.choice(self.Ymat.shape[0], self.rank, replace=False)
X = self.Ymat[indices, 0:t0]
mX = np.std(X, axis=1)
mX[mX == 0] = 1.0
X = X / mX[:, None]
Ft = get_model(X.transpose(), self.Ymat[:, 0:t0].transpose(), lamb=0.1)
F = Ft[0].transpose()
self.X = torch.from_numpy(X).float()
self.F = torch.from_numpy(F).float()
else:
R = torch.zeros(self.rank, t0).float()
X = torch.normal(R, 0.1)
C = torch.zeros(n, self.rank).float()
F = torch.normal(C, 0.1)
self.X = X.float()
self.F = F.float()
self.D = TCMFDataLoader(
Ymat=self.Ymat,
vbsize=self.vbsize,
hbsize=self.hbsize,
end_index=self.end_index,
val_len=val_len,
shuffle=False,
)
# print("-"*50+"Initializing Factors.....")
logger.info("Initializing Factors")
self.num_epochs = init_epochs
self.train_factors(val_len=val_len)
if alt_iters % 2 == 1:
alt_iters += 1
# print("Starting Alternate Training.....")
logger.info("Starting Alternate Training.....")
for i in range(1, alt_iters):
if i % 2 == 0:
logger.info("Training Factors. Iter#:{}".format(i))
self.num_epochs = max_FX_epoch
self.train_factors(
seed=False, val_len=val_len,
early_stop=True, tenacity=tenacity, mod=mod
)
else:
# logger.info(
# "--------------------------------------------Training Xseq Model. Iter#:{}"
# .format(i)
# + "-------------------------------------------------------"
# )
logger.info("Training Xseq Model. Iter#:{}".format(i))
self.num_epochs = max_TCN_epoch
T = np.array(self.X.detach())
self.train_Xseq(
Ymat=T,
num_epochs=self.num_epochs,
val_len=val_len,
early_stop=True,
tenacity=tenacity,
)
logger.info("Start training Yseq.....")
val_loss = self.train_Yseq(num_epochs=y_iters,
covariates=covariates,
dti=dti,
val_len=val_len,
num_workers=num_workers,
)
return val_loss
def append_new_y(self, Ymat_new, covariates_new=None, dti_new=None):
# update Yseq
# normalize the incremented Ymat if needed
if self.normalize:
Ymat_new = (Ymat_new - self.m[:, None]) / self.s[:, None]
Ymat_new = Ymat_new + self.mini
# append the new Ymat onto the original, note that self.end_index equals to the no.of time
# steps of the original.
n, T_added = Ymat_new.shape
self.Ymat = np.concatenate((self.Ymat[:, : self.end_index], Ymat_new), axis=1)
self.end_index = self.end_index + T_added
n, T = self.Ymat.shape
t0 = self.end_index + 1
if t0 > T:
self.Ymat = np.hstack([self.Ymat, self.Ymat[:, -1].reshape(-1, 1)])
# update Yseq.covariates
last_step = self.end_index - T_added
new_covariates = self.get_future_time_covs(T_added, last_step,
future_covariates=covariates_new,
future_dti=dti_new)
self.Yseq.covariates = np.hstack([self.Yseq.covariates[:, :last_step], new_covariates])
def inject_new(self,
Ymat_new,
covariates_new=None,
dti_new=None):
if self.Ymat.shape[0] != Ymat_new.shape[0]:
raise ValueError("Expected incremental input with {} time series, got {} instead."
.format(self.Ymat.shape[0], Ymat_new.shape[0]))
self.append_new_y(Ymat_new, covariates_new=covariates_new, dti_new=dti_new)
n, T = self.Ymat.shape
rank, XT = self.X.shape
future = T - XT
Xn = self.recover_future_X(
last_step=XT,
future=future,
num_epochs=100000,
alpha=0.3,
vanilla=True,
)
self.X = torch.cat([self.X, Xn], dim=1)
def get_time_covs(self, future_start_date, num_ts, future_covariates, future_dti):
if self.use_time:
future_time = TimeCovariates(
start_date=future_start_date,
freq=self.freq,
normalized=True,
num_ts=num_ts
)
if future_dti is not None:
future_time.dti = future_dti
time_covariates = future_time.get_covariates()
if future_covariates is None:
covariates = time_covariates
else:
covariates = np.vstack([time_covariates, future_covariates])
else:
covariates = future_covariates
return covariates
def get_future_time_covs(self, horizon, last_step, future_covariates, future_dti):
if self.freq[0].isalpha():
freq = "1" + self.freq
else:
freq = self.freq
future_start_date = pd.Timestamp(self.start_date) + pd.Timedelta(freq) * last_step
covs_future = self.get_time_covs(future_start_date=future_start_date,
num_ts=horizon,
future_covariates=future_covariates,
future_dti=future_dti)
return covs_future
def get_prediction_time_covs(self, rg, horizon, last_step, future_covariates, future_dti):
covs_past = self.Yseq.covariates[:, last_step - rg: last_step]
covs_future = self.get_future_time_covs(horizon, last_step, future_covariates, future_dti)
covs = np.concatenate([covs_past, covs_future], axis=1)
return covs
def predict_horizon(
self,
ind=None,
future=10,
future_covariates=None,
future_dti=None,
bsize=90,
num_workers=1,
):
last_step = self.end_index
if ind is None:
ind = np.arange(self.Ymat.shape[0])
self.Yseq.seq = self.Yseq.seq.eval()
self.Xseq = self.Xseq.eval()
rg = max(
1 + 2 * (self.kernel_size - 1) * 2 ** (len(self.num_channels_X) - 1),
1 + 2 * (self.kernel_size_Y - 1) * 2 ** (len(self.num_channels_Y) - 1),
)
covs = self.get_prediction_time_covs(rg, future, last_step, future_covariates, future_dti)
yc = self.predict_global(
ind=ind,
last_step=last_step,
future=future,
normalize=False,
bsize=bsize,
)
if self.period is None:
ycovs = np.zeros(shape=[yc.shape[0], 1, yc.shape[1]])
if self.forward_cov:
ycovs[:, 0, 0:-1] = yc[:, 1::]
else:
ycovs[:, 0, :] = yc
else:
ycovs = np.zeros(shape=[yc.shape[0], 2, yc.shape[1]])
if self.forward_cov:
ycovs[:, 0, 0:-1] = yc[:, 1::]
else:
ycovs[:, 0, :] = yc
period = self.period
while last_step + future - (period - 1) > last_step + 1:
period += self.period
# The last coordinate is not used.
ycovs[:, 1, period - 1::] = self.Ymat[
:, last_step - rg: last_step + future - (period - 1)]
Y = self.Yseq.predict_future(
data_in=self.Ymat[ind, last_step - rg: last_step],
covariates=covs,
ycovs=ycovs,
future=future,
bsize=bsize,
normalize=False,
num_workers=num_workers,
)
if self.normalize:
Y = Y - self.mini
Y = Y * self.s[ind, None] + self.m[ind, None]
return Y
else:
return Y
def predict(
self, ind=None, last_step=100, future=10, normalize=False, bsize=90
):
if ind is None:
ind = np.arange(self.Ymat.shape[0])
self.Xseq = self.Xseq
self.Yseq.seq = self.Yseq.seq.eval()
self.Xseq = self.Xseq.eval()
rg = max(
1 + 2 * (self.kernel_size - 1) * 2 ** (len(self.num_channels_X) - 1),
1 + 2 * (self.kernel_size_Y - 1) * 2 ** (len(self.num_channels_Y) - 1),
)
covs = self.Yseq.covariates[:, last_step - rg: last_step + future]
# print(covs.shape)
yc = self.predict_global(
ind=ind,
last_step=last_step,
future=future,
normalize=False,
bsize=bsize,
)
if self.period is None:
ycovs = np.zeros(shape=[yc.shape[0], 1, yc.shape[1]])
if self.forward_cov:
ycovs[:, 0, 0:-1] = yc[:, 1::]
else:
ycovs[:, 0, :] = yc
else:
ycovs = np.zeros(shape=[yc.shape[0], 2, yc.shape[1]])
if self.forward_cov:
ycovs[:, 0, 0:-1] = yc[:, 1::]
else:
ycovs[:, 0, :] = yc
period = self.period
while last_step + future - (period - 1) > last_step + 1:
period += self.period
# this seems like we are looking ahead, but it will not use the last coordinate,
# which is the only new point added
ycovs[:, 1, period - 1::] = self.Ymat[
:, last_step - rg: last_step + future - (period - 1)]
Y = self.Yseq.predict_future(
data_in=self.Ymat[ind, last_step - rg: last_step],
covariates=covs,
ycovs=ycovs,
future=future,
bsize=bsize,
normalize=False,
)
if normalize:
Y = Y - self.mini
Y = Y * self.s[ind, None] + self.m[ind, None]
return Y
else:
return Y
def rolling_validation(self, Ymat, tau=24, n=7, bsize=90, alpha=0.3):
prevX = self.X.clone()
prev_index = self.end_index
out = self.predict(
last_step=self.end_index,
future=tau,
bsize=bsize,
normalize=self.normalize,
)
out_global = self.predict_global(
np.arange(self.Ymat.shape[0]),
last_step=self.end_index,
future=tau,
normalize=self.normalize,
bsize=bsize,
)
predicted_values = []
actual_values = []
predicted_values_global = []
S = out[:, -tau::]
S_g = out_global[:, -tau::]
predicted_values += [S]
predicted_values_global += [S_g]
R = Ymat[:, self.end_index: self.end_index + tau]
actual_values += [R]
print("Current window wape:{}".format(wape(S, R)))
self.Xseq = self.Xseq.eval()
self.Yseq.seq = self.Yseq.seq.eval()
for i in range(n - 1):
Xn = self.recover_future_X(
last_step=self.end_index + 1,
future=tau,
num_epochs=100000,
alpha=alpha,
vanilla=True
)
self.X = torch.cat([self.X, Xn], dim=1)
self.end_index += tau
out = self.predict(
last_step=self.end_index,
future=tau,
bsize=bsize,
normalize=self.normalize,
)
out_global = self.predict_global(
np.arange(self.Ymat.shape[0]),
last_step=self.end_index,
future=tau,
normalize=self.normalize,
bsize=bsize,
)
S = out[:, -tau::]
S_g = out_global[:, -tau::]
predicted_values += [S]
predicted_values_global += [S_g]
R = Ymat[:, self.end_index: self.end_index + tau]
actual_values += [R]
print("Current window wape:{}".format(wape(S, R)))
predicted = np.hstack(predicted_values)
predicted_global = np.hstack(predicted_values_global)
actual = np.hstack(actual_values)
dic = {}
dic["wape"] = wape(predicted, actual)
dic["mape"] = mape(predicted, actual)
dic["smape"] = smape(predicted, actual)
dic["mae"] = np.abs(predicted - actual).mean()
dic["rmse"] = np.sqrt(((predicted - actual) ** 2).mean())
dic["nrmse"] = dic["rmse"] / np.sqrt(((actual) ** 2).mean())
dic["wape_global"] = wape(predicted_global, actual)
dic["mape_global"] = mape(predicted_global, actual)
dic["smape_global"] = smape(predicted_global, actual)
dic["mae_global"] = np.abs(predicted_global - actual).mean()
dic["rmse_global"] = np.sqrt(((predicted_global - actual) ** 2).mean())
dic["nrmse_global"] = dic["rmse"] / np.sqrt(((actual) ** 2).mean())
baseline = Ymat[:, Ymat.shape[1] - n * tau - tau: Ymat.shape[1] - tau]
dic["baseline_wape"] = wape(baseline, actual)
dic["baseline_mape"] = mape(baseline, actual)
dic["baseline_smape"] = smape(baseline, actual)
self.X = prevX
self.end_index = prev_index
return dic
| apache-2.0 |
admcrae/tensorflow | tensorflow/contrib/keras/python/keras/callbacks.py | 25 | 33691 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras callbacks: utilities called at certain points during model training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import deque
from collections import Iterable
from collections import OrderedDict
import csv
import json
import os
import time
import warnings
import numpy as np
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras.utils.generic_utils import Progbar
from tensorflow.contrib.tensorboard.plugins import projector
from tensorflow.python.ops import array_ops
from tensorflow.python.summary import summary as tf_summary
from tensorflow.python.training import saver as saver_lib
# pylint: disable=g-import-not-at-top
try:
import requests
except ImportError:
requests = None
# pylint: enable=g-import-not-at-top
class CallbackList(object):
"""Container abstracting a list of callbacks.
Arguments:
callbacks: List of `Callback` instances.
queue_length: Queue length for keeping
running statistics over callback execution time.
"""
def __init__(self, callbacks=None, queue_length=10):
callbacks = callbacks or []
self.callbacks = [c for c in callbacks]
self.queue_length = queue_length
def append(self, callback):
self.callbacks.append(callback)
def set_params(self, params):
for callback in self.callbacks:
callback.set_params(params)
def set_model(self, model):
for callback in self.callbacks:
callback.set_model(model)
def on_epoch_begin(self, epoch, logs=None):
"""Called at the start of an epoch.
Arguments:
epoch: integer, index of epoch.
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
self._delta_t_batch = 0.
self._delta_ts_batch_begin = deque([], maxlen=self.queue_length)
self._delta_ts_batch_end = deque([], maxlen=self.queue_length)
def on_epoch_end(self, epoch, logs=None):
"""Called at the end of an epoch.
Arguments:
epoch: integer, index of epoch.
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_batch_begin(self, batch, logs=None):
"""Called right before processing a batch.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dictionary of logs.
"""
logs = logs or {}
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_begin(batch, logs)
self._delta_ts_batch_begin.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_begin)
if (self._delta_t_batch > 0. and
delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1):
warnings.warn(
'Method on_batch_begin() is slow compared '
'to the batch update (%f). Check your callbacks.' % delta_t_median)
self._t_enter_batch = time.time()
def on_batch_end(self, batch, logs=None):
"""Called at the end of a batch.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dictionary of logs.
"""
logs = logs or {}
if not hasattr(self, '_t_enter_batch'):
self._t_enter_batch = time.time()
self._delta_t_batch = time.time() - self._t_enter_batch
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_end(batch, logs)
self._delta_ts_batch_end.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_end)
if (self._delta_t_batch > 0. and
(delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1)):
warnings.warn(
'Method on_batch_end() is slow compared '
'to the batch update (%f). Check your callbacks.' % delta_t_median)
def on_train_begin(self, logs=None):
"""Called at the beginning of training.
Arguments:
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs=None):
"""Called at the end of training.
Arguments:
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_train_end(logs)
def __iter__(self):
return iter(self.callbacks)
class Callback(object):
"""Abstract base class used to build new callbacks.
# Properties
params: dict. Training parameters
(eg. verbosity, batch size, number of epochs...).
model: instance of `keras.models.Model`.
Reference of the model being trained.
The `logs` dictionary that callback methods
take as argument will contain keys for quantities relevant to
the current batch or epoch.
Currently, the `.fit()` method of the `Sequential` model class
will include the following quantities in the `logs` that
it passes to its callbacks:
on_epoch_end: logs include `acc` and `loss`, and
optionally include `val_loss`
(if validation is enabled in `fit`), and `val_acc`
(if validation and accuracy monitoring are enabled).
on_batch_begin: logs include `size`,
the number of samples in the current batch.
on_batch_end: logs include `loss`, and optionally `acc`
(if accuracy monitoring is enabled).
"""
def __init__(self):
self.validation_data = None
def set_params(self, params):
self.params = params
def set_model(self, model):
self.model = model
def on_epoch_begin(self, epoch, logs=None):
pass
def on_epoch_end(self, epoch, logs=None):
pass
def on_batch_begin(self, batch, logs=None):
pass
def on_batch_end(self, batch, logs=None):
pass
def on_train_begin(self, logs=None):
pass
def on_train_end(self, logs=None):
pass
class BaseLogger(Callback):
"""Callback that accumulates epoch averages of metrics.
This callback is automatically applied to every Keras model.
"""
def on_epoch_begin(self, epoch, logs=None):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
self.seen += batch_size
for k, v in logs.items():
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs=None):
if logs is not None:
for k in self.params['metrics']:
if k in self.totals:
# Make value available to next callbacks.
logs[k] = self.totals[k] / self.seen
class ProgbarLogger(Callback):
"""Callback that prints metrics to stdout.
Arguments:
count_mode: One of "steps" or "samples".
Whether the progress bar should
count samples seens or steps (batches) seen.
Raises:
ValueError: In case of invalid `count_mode`.
"""
def __init__(self, count_mode='samples'):
super(ProgbarLogger, self).__init__()
if count_mode == 'samples':
self.use_steps = False
elif count_mode == 'steps':
self.use_steps = True
else:
raise ValueError('Unknown `count_mode`: ' + str(count_mode))
def on_train_begin(self, logs=None):
self.verbose = self.params['verbose']
self.epochs = self.params['epochs']
def on_epoch_begin(self, epoch, logs=None):
if self.verbose:
print('Epoch %d/%d' % (epoch + 1, self.epochs))
if self.use_steps:
target = self.params['steps']
else:
target = self.params['samples']
self.target = target
self.progbar = Progbar(target=self.target, verbose=self.verbose)
self.seen = 0
def on_batch_begin(self, batch, logs=None):
if self.seen < self.target:
self.log_values = []
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
if self.use_steps:
self.seen += 1
else:
self.seen += batch_size
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
# Skip progbar update for the last batch;
# will be handled by on_epoch_end.
if self.verbose and self.seen < self.target:
self.progbar.update(self.seen, self.log_values)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
if self.verbose:
self.progbar.update(self.seen, self.log_values, force=True)
class History(Callback):
"""Callback that records events into a `History` object.
This callback is automatically applied to
every Keras model. The `History` object
gets returned by the `fit` method of models.
"""
def on_train_begin(self, logs=None):
self.epoch = []
self.history = {}
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epoch.append(epoch)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
class ModelCheckpoint(Callback):
"""Save the model after every epoch.
`filepath` can contain named formatting options,
which will be filled the value of `epoch` and
keys in `logs` (passed in `on_epoch_end`).
For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`,
then the model checkpoints will be saved with the epoch number and
the validation loss in the filename.
Arguments:
filepath: string, path to save the model file.
monitor: quantity to monitor.
verbose: verbosity mode, 0 or 1.
save_best_only: if `save_best_only=True`,
the latest best model according to
the quantity monitored will not be overwritten.
mode: one of {auto, min, max}.
If `save_best_only=True`, the decision
to overwrite the current save file is made
based on either the maximization or the
minimization of the monitored quantity. For `val_acc`,
this should be `max`, for `val_loss` this should
be `min`, etc. In `auto` mode, the direction is
automatically inferred from the name of the monitored quantity.
save_weights_only: if True, then only the model's weights will be
saved (`model.save_weights(filepath)`), else the full model
is saved (`model.save(filepath)`).
period: Interval (number of epochs) between checkpoints.
"""
def __init__(self,
filepath,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
period=1):
super(ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.period = period
self.epochs_since_last_save = 0
if mode not in ['auto', 'min', 'max']:
warnings.warn('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.' % (mode), RuntimeWarning)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
filepath = self.filepath.format(epoch=epoch, **logs)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
warnings.warn('Can save best model only with %s available, '
'skipping.' % (self.monitor), RuntimeWarning)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print('Epoch %05d: %s improved from %0.5f to %0.5f,'
' saving model to %s' % (epoch, self.monitor, self.best,
current, filepath))
self.best = current
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
else:
if self.verbose > 0:
print('Epoch %05d: %s did not improve' % (epoch, self.monitor))
else:
if self.verbose > 0:
print('Epoch %05d: saving model to %s' % (epoch, filepath))
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
class EarlyStopping(Callback):
"""Stop training when a monitored quantity has stopped improving.
Arguments:
monitor: quantity to be monitored.
min_delta: minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
patience: number of epochs with no improvement
after which training will be stopped.
verbose: verbosity mode.
mode: one of {auto, min, max}. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
"""
def __init__(self,
monitor='val_loss',
min_delta=0,
patience=0,
verbose=0,
mode='auto'):
super(EarlyStopping, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.min_delta = min_delta
self.wait = 0
self.stopped_epoch = 0
if mode not in ['auto', 'min', 'max']:
warnings.warn('EarlyStopping mode %s is unknown, '
'fallback to auto mode.' % (self.mode), RuntimeWarning)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
elif mode == 'max':
self.monitor_op = np.greater
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
else:
self.monitor_op = np.less
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
def on_train_begin(self, logs=None):
self.wait = 0 # Allow instances to be re-used
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
def on_epoch_end(self, epoch, logs=None):
current = logs.get(self.monitor)
if current is None:
warnings.warn('Early stopping requires %s available!' % (self.monitor),
RuntimeWarning)
if self.monitor_op(current - self.min_delta, self.best):
self.best = current
self.wait = 0
else:
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
self.wait += 1
def on_train_end(self, logs=None):
if self.stopped_epoch > 0 and self.verbose > 0:
print('Epoch %05d: early stopping' % (self.stopped_epoch))
class RemoteMonitor(Callback):
"""Callback used to stream events to a server.
Requires the `requests` library.
Events are sent to `root + '/publish/epoch/end/'` by default. Calls are
HTTP POST, with a `data` argument which is a
JSON-encoded dictionary of event data.
Arguments:
root: String; root url of the target server.
path: String; path relative to `root` to which the events will be sent.
field: String; JSON field under which the data will be stored.
headers: Dictionary; optional custom HTTP headers.
Defaults to:
`{'Accept': 'application/json',
'Content-Type': 'application/json'}`
"""
def __init__(self,
root='http://localhost:9000',
path='/publish/epoch/end/',
field='data',
headers=None):
super(RemoteMonitor, self).__init__()
if headers is None:
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
self.root = root
self.path = path
self.field = field
self.headers = headers
def on_epoch_end(self, epoch, logs=None):
if requests is None:
raise ImportError('RemoteMonitor requires ' 'the `requests` library.')
logs = logs or {}
send = {}
send['epoch'] = epoch
for k, v in logs.items():
send[k] = v
try:
requests.post(
self.root + self.path, {self.field: json.dumps(send)},
headers=self.headers)
except requests.exceptions.RequestException:
warnings.warn('Warning: could not reach RemoteMonitor '
'root server at ' + str(self.root))
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
Arguments:
schedule: a function that takes an epoch index as input
(integer, indexed from 0) and returns a new
learning rate as output (float).
"""
def __init__(self, schedule):
super(LearningRateScheduler, self).__init__()
self.schedule = schedule
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
lr = self.schedule(epoch)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function '
'should be float.')
K.set_value(self.model.optimizer.lr, lr)
class TensorBoard(Callback):
# pylint: disable=line-too-long
"""Tensorboard basic visualizations.
This callback writes a log for TensorBoard, which allows
you to visualize dynamic graphs of your training and test
metrics, as well as activation histograms for the different
layers in your model.
Arguments:
log_dir: the path of the directory where to save the log
files to be parsed by Tensorboard.
histogram_freq: frequency (in epochs) at which to compute activation
histograms for the layers of the model. If set to 0,
histograms won't be computed.
write_graph: whether to visualize the graph in Tensorboard.
The log file can become quite large when
write_graph is set to True.
write_images: whether to write model weights to visualize as
image in Tensorboard.
embeddings_freq: frequency (in epochs) at which selected embedding
layers will be saved.
embeddings_layer_names: a list of names of layers to keep eye on. If
None or empty list all the embedding layer will be watched.
embeddings_metadata: a dictionary which maps layer name to a file name
in which metadata for this embedding layer is saved. See the
[details](https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
about metadata files format. In case if the same metadata file is
used for all embedding layers, string can be passed.
"""
# pylint: enable=line-too-long
def __init__(self,
log_dir='./logs',
histogram_freq=0,
write_graph=True,
write_images=False,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None):
super(TensorBoard, self).__init__()
self.log_dir = log_dir
self.histogram_freq = histogram_freq
self.merged = None
self.write_graph = write_graph
self.write_images = write_images
self.embeddings_freq = embeddings_freq
self.embeddings_layer_names = embeddings_layer_names
self.embeddings_metadata = embeddings_metadata or {}
def set_model(self, model):
self.model = model
self.sess = K.get_session()
if self.histogram_freq and self.merged is None:
for layer in self.model.layers:
for weight in layer.weights:
tf_summary.histogram(weight.name, weight)
if self.write_images:
w_img = array_ops.squeeze(weight)
shape = w_img.get_shape()
if len(shape) > 1 and shape[0] > shape[1]:
w_img = array_ops.transpose(w_img)
if len(shape) == 1:
w_img = array_ops.expand_dims(w_img, 0)
w_img = array_ops.expand_dims(array_ops.expand_dims(w_img, 0), -1)
tf_summary.image(weight.name, w_img)
if hasattr(layer, 'output'):
tf_summary.histogram('{}_out'.format(layer.name), layer.output)
self.merged = tf_summary.merge_all()
if self.write_graph:
self.writer = tf_summary.FileWriter(self.log_dir, self.sess.graph)
else:
self.writer = tf_summary.FileWriter(self.log_dir)
if self.embeddings_freq:
self.saver = saver_lib.Saver()
embeddings_layer_names = self.embeddings_layer_names
if not embeddings_layer_names:
embeddings_layer_names = [
layer.name for layer in self.model.layers
if type(layer).__name__ == 'Embedding'
]
embeddings = {
layer.name: layer.weights[0]
for layer in self.model.layers if layer.name in embeddings_layer_names
}
embeddings_metadata = {}
if not isinstance(self.embeddings_metadata, str):
embeddings_metadata = self.embeddings_metadata
else:
embeddings_metadata = {
layer_name: self.embeddings_metadata
for layer_name in embeddings.keys()
}
config = projector.ProjectorConfig()
self.embeddings_logs = []
for layer_name, tensor in embeddings.items():
embedding = config.embeddings.add()
embedding.tensor_name = tensor.name
self.embeddings_logs.append(
os.path.join(self.log_dir, layer_name + '.ckpt'))
if layer_name in embeddings_metadata:
embedding.metadata_path = embeddings_metadata[layer_name]
projector.visualize_embeddings(self.writer, config)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
if self.validation_data and self.histogram_freq:
if epoch % self.histogram_freq == 0:
# TODO(fchollet): implement batched calls to sess.run
# (current call will likely go OOM on GPU)
if self.model.uses_learning_phase:
cut_v_data = len(self.model.inputs)
val_data = self.validation_data[:cut_v_data] + [0]
tensors = self.model.inputs + [K.learning_phase()]
else:
val_data = self.validation_data
tensors = self.model.inputs
feed_dict = dict(zip(tensors, val_data))
result = self.sess.run([self.merged], feed_dict=feed_dict)
summary_str = result[0]
self.writer.add_summary(summary_str, epoch)
if self.embeddings_freq and self.embeddings_logs:
if epoch % self.embeddings_freq == 0:
for log in self.embeddings_logs:
self.saver.save(self.sess, log, epoch)
for name, value in logs.items():
if name in ['batch', 'size']:
continue
summary = tf_summary.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value.item()
summary_value.tag = name
self.writer.add_summary(summary, epoch)
self.writer.flush()
def on_train_end(self, _):
self.writer.close()
class ReduceLROnPlateau(Callback):
"""Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This callback monitors a
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
Example:
```python
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(X_train, Y_train, callbacks=[reduce_lr])
```
Arguments:
monitor: quantity to be monitored.
factor: factor by which the learning rate will
be reduced. new_lr = lr * factor
patience: number of epochs with no improvement
after which learning rate will be reduced.
verbose: int. 0: quiet, 1: update messages.
mode: one of {auto, min, max}. In `min` mode,
lr will be reduced when the quantity
monitored has stopped decreasing; in `max`
mode it will be reduced when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
epsilon: threshold for measuring the new optimum,
to only focus on significant changes.
cooldown: number of epochs to wait before resuming
normal operation after lr has been reduced.
min_lr: lower bound on the learning rate.
"""
def __init__(self,
monitor='val_loss',
factor=0.1,
patience=10,
verbose=0,
mode='auto',
epsilon=1e-4,
cooldown=0,
min_lr=0):
super(ReduceLROnPlateau, self).__init__()
self.monitor = monitor
if factor >= 1.0:
raise ValueError('ReduceLROnPlateau ' 'does not support a factor >= 1.0.')
self.factor = factor
self.min_lr = min_lr
self.epsilon = epsilon
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0 # Cooldown counter.
self.wait = 0
self.best = 0
self.mode = mode
self.monitor_op = None
self._reset()
def _reset(self):
"""Resets wait counter and cooldown counter.
"""
if self.mode not in ['auto', 'min', 'max']:
warnings.warn('Learning Rate Plateau Reducing mode %s is unknown, '
'fallback to auto mode.' % (self.mode), RuntimeWarning)
self.mode = 'auto'
if (self.mode == 'min' or
(self.mode == 'auto' and 'acc' not in self.monitor)):
self.monitor_op = lambda a, b: np.less(a, b - self.epsilon)
self.best = np.Inf
else:
self.monitor_op = lambda a, b: np.greater(a, b + self.epsilon)
self.best = -np.Inf
self.cooldown_counter = 0
self.wait = 0
self.lr_epsilon = self.min_lr * 1e-4
def on_train_begin(self, logs=None):
self._reset()
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
current = logs.get(self.monitor)
if current is None:
warnings.warn('Learning Rate Plateau Reducing requires %s available!' %
self.monitor, RuntimeWarning)
else:
if self.in_cooldown():
self.cooldown_counter -= 1
self.wait = 0
if self.monitor_op(current, self.best):
self.best = current
self.wait = 0
elif not self.in_cooldown():
if self.wait >= self.patience:
old_lr = float(K.get_value(self.model.optimizer.lr))
if old_lr > self.min_lr + self.lr_epsilon:
new_lr = old_lr * self.factor
new_lr = max(new_lr, self.min_lr)
K.set_value(self.model.optimizer.lr, new_lr)
if self.verbose > 0:
print('\nEpoch %05d: reducing learning rate to %s.' % (epoch,
new_lr))
self.cooldown_counter = self.cooldown
self.wait = 0
self.wait += 1
def in_cooldown(self):
return self.cooldown_counter > 0
class CSVLogger(Callback):
"""Callback that streams epoch results to a csv file.
Supports all values that can be represented as a string,
including 1D iterables such as np.ndarray.
Example:
```python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
```
Arguments:
filename: filename of the csv file, e.g. 'run/log.csv'.
separator: string used to separate elements in the csv file.
append: True: append if file exists (useful for continuing
training). False: overwrite existing file,
"""
def __init__(self, filename, separator=',', append=False):
self.sep = separator
self.filename = filename
self.append = append
self.writer = None
self.keys = None
self.append_header = True
super(CSVLogger, self).__init__()
def on_train_begin(self, logs=None):
if self.append:
if os.path.exists(self.filename):
with open(self.filename) as f:
self.append_header = not bool(len(f.readline()))
self.csv_file = open(self.filename, 'a')
else:
self.csv_file = open(self.filename, 'w')
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, Iterable) and not is_zero_dim_ndarray:
return '"[%s]"' % (', '.join(map(str, k)))
else:
return k
if not self.writer:
self.keys = sorted(logs.keys())
class CustomDialect(csv.excel):
delimiter = self.sep
self.writer = csv.DictWriter(
self.csv_file,
fieldnames=['epoch'] + self.keys,
dialect=CustomDialect)
if self.append_header:
self.writer.writeheader()
row_dict = OrderedDict({'epoch': epoch})
row_dict.update((key, handle_value(logs[key])) for key in self.keys)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
class LambdaCallback(Callback):
"""Callback for creating simple, custom callbacks on-the-fly.
This callback is constructed with anonymous functions that will be called
at the appropriate time. Note that the callbacks expects positional
arguments, as:
- `on_epoch_begin` and `on_epoch_end` expect two positional arguments:
`epoch`, `logs`
- `on_batch_begin` and `on_batch_end` expect two positional arguments:
`batch`, `logs`
- `on_train_begin` and `on_train_end` expect one positional argument:
`logs`
Arguments:
on_epoch_begin: called at the beginning of every epoch.
on_epoch_end: called at the end of every epoch.
on_batch_begin: called at the beginning of every batch.
on_batch_end: called at the end of every batch.
on_train_begin: called at the beginning of model training.
on_train_end: called at the end of model training.
Example:
```python
# Print the batch number at the beginning of every batch.
batch_print_callback = LambdaCallback(
on_batch_begin=lambda batch,logs: print(batch))
# Plot the loss after every epoch.
import numpy as np
import matplotlib.pyplot as plt
plot_loss_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: plt.plot(np.arange(epoch),
logs['loss']))
# Terminate some processes after having finished model training.
processes = ...
cleanup_callback = LambdaCallback(
on_train_end=lambda logs: [
p.terminate() for p in processes if p.is_alive()])
model.fit(...,
callbacks=[batch_print_callback,
plot_loss_callback,
cleanup_callback])
```
"""
def __init__(self,
on_epoch_begin=None,
on_epoch_end=None,
on_batch_begin=None,
on_batch_end=None,
on_train_begin=None,
on_train_end=None,
**kwargs):
super(LambdaCallback, self).__init__()
self.__dict__.update(kwargs)
if on_epoch_begin is not None:
self.on_epoch_begin = on_epoch_begin
else:
self.on_epoch_begin = lambda epoch, logs: None
if on_epoch_end is not None:
self.on_epoch_end = on_epoch_end
else:
self.on_epoch_end = lambda epoch, logs: None
if on_batch_begin is not None:
self.on_batch_begin = on_batch_begin
else:
self.on_batch_begin = lambda batch, logs: None
if on_batch_end is not None:
self.on_batch_end = on_batch_end
else:
self.on_batch_end = lambda batch, logs: None
if on_train_begin is not None:
self.on_train_begin = on_train_begin
else:
self.on_train_begin = lambda logs: None
if on_train_end is not None:
self.on_train_end = on_train_end
else:
self.on_train_end = lambda logs: None
| apache-2.0 |
jmmease/pandas | pandas/tests/io/json/test_pandas.py | 1 | 42616 | # -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import pytest
from pandas.compat import (range, lrange, StringIO,
OrderedDict, is_platform_32bit)
import os
import numpy as np
from pandas import (Series, DataFrame, DatetimeIndex, Timestamp,
read_json, compat)
from datetime import timedelta
import pandas as pd
from pandas.util.testing import (assert_almost_equal, assert_frame_equal,
assert_series_equal, network,
ensure_clean, assert_index_equal)
import pandas.util.testing as tm
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])
_intframe = DataFrame(dict((k, v.astype(np.int64))
for k, v in compat.iteritems(_seriesd)))
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ['bah'] * 5 + ['bar'] * 5 + ['baz'] * \
5 + ['foo'] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name='E')
_cat_frame['E'] = list(reversed(cat))
_cat_frame['sort'] = np.arange(len(_cat_frame), dtype='int64')
_mixed_frame = _frame.copy()
class TestPandasContainer(object):
def setup_method(self, method):
self.dirpath = tm.get_data_path()
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.objSeries = tm.makeObjectSeries()
self.objSeries.name = 'objects'
self.empty_series = Series([], index=[])
self.empty_frame = DataFrame({})
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
def teardown_method(self, method):
del self.dirpath
del self.ts
del self.series
del self.objSeries
del self.empty_series
del self.empty_frame
del self.frame
del self.frame2
del self.intframe
del self.tsframe
del self.mixed_frame
def test_frame_double_encoded_labels(self):
df = DataFrame([['a', 'b'], ['c', 'd']],
index=['index " 1', 'index / 2'],
columns=['a \\ b', 'y / z'])
assert_frame_equal(df, read_json(df.to_json(orient='split'),
orient='split'))
assert_frame_equal(df, read_json(df.to_json(orient='columns'),
orient='columns'))
assert_frame_equal(df, read_json(df.to_json(orient='index'),
orient='index'))
df_unser = read_json(df.to_json(orient='records'), orient='records')
assert_index_equal(df.columns, df_unser.columns)
tm.assert_numpy_array_equal(df.values, df_unser.values)
def test_frame_non_unique_index(self):
df = DataFrame([['a', 'b'], ['c', 'd']], index=[1, 1],
columns=['x', 'y'])
pytest.raises(ValueError, df.to_json, orient='index')
pytest.raises(ValueError, df.to_json, orient='columns')
assert_frame_equal(df, read_json(df.to_json(orient='split'),
orient='split'))
unser = read_json(df.to_json(orient='records'), orient='records')
tm.assert_index_equal(df.columns, unser.columns)
tm.assert_almost_equal(df.values, unser.values)
unser = read_json(df.to_json(orient='values'), orient='values')
tm.assert_numpy_array_equal(df.values, unser.values)
def test_frame_non_unique_columns(self):
df = DataFrame([['a', 'b'], ['c', 'd']], index=[1, 2],
columns=['x', 'x'])
pytest.raises(ValueError, df.to_json, orient='index')
pytest.raises(ValueError, df.to_json, orient='columns')
pytest.raises(ValueError, df.to_json, orient='records')
assert_frame_equal(df, read_json(df.to_json(orient='split'),
orient='split', dtype=False))
unser = read_json(df.to_json(orient='values'), orient='values')
tm.assert_numpy_array_equal(df.values, unser.values)
# GH4377; duplicate columns not processing correctly
df = DataFrame([['a', 'b'], ['c', 'd']], index=[
1, 2], columns=['x', 'y'])
result = read_json(df.to_json(orient='split'), orient='split')
assert_frame_equal(result, df)
def _check(df):
result = read_json(df.to_json(orient='split'), orient='split',
convert_dates=['x'])
assert_frame_equal(result, df)
for o in [[['a', 'b'], ['c', 'd']],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp('20130101'), 3.5],
[Timestamp('20130102'), 4.5]]]:
_check(DataFrame(o, index=[1, 2], columns=['x', 'x']))
def test_frame_from_json_to_json(self):
def _check_orient(df, orient, dtype=None, numpy=False,
convert_axes=True, check_dtype=True, raise_ok=None,
sort=None, check_index_type=True,
check_column_type=True, check_numpy_dtype=False):
if sort is not None:
df = df.sort_values(sort)
else:
df = df.sort_index()
# if we are not unique, then check that we are raising ValueError
# for the appropriate orients
if not df.index.is_unique and orient in ['index', 'columns']:
pytest.raises(
ValueError, lambda: df.to_json(orient=orient))
return
if (not df.columns.is_unique and
orient in ['index', 'columns', 'records']):
pytest.raises(
ValueError, lambda: df.to_json(orient=orient))
return
dfjson = df.to_json(orient=orient)
try:
unser = read_json(dfjson, orient=orient, dtype=dtype,
numpy=numpy, convert_axes=convert_axes)
except Exception as detail:
if raise_ok is not None:
if isinstance(detail, raise_ok):
return
raise
if sort is not None and sort in unser.columns:
unser = unser.sort_values(sort)
else:
unser = unser.sort_index()
if dtype is False:
check_dtype = False
if not convert_axes and df.index.dtype.type == np.datetime64:
unser.index = DatetimeIndex(
unser.index.values.astype('i8') * 1e6)
if orient == "records":
# index is not captured in this orientation
tm.assert_almost_equal(df.values, unser.values,
check_dtype=check_numpy_dtype)
tm.assert_index_equal(df.columns, unser.columns,
exact=check_column_type)
elif orient == "values":
# index and cols are not captured in this orientation
if numpy is True and df.shape == (0, 0):
assert unser.shape[0] == 0
else:
tm.assert_almost_equal(df.values, unser.values,
check_dtype=check_numpy_dtype)
elif orient == "split":
# index and col labels might not be strings
unser.index = [str(i) for i in unser.index]
unser.columns = [str(i) for i in unser.columns]
if sort is None:
unser = unser.sort_index()
tm.assert_almost_equal(df.values, unser.values,
check_dtype=check_numpy_dtype)
else:
if convert_axes:
tm.assert_frame_equal(df, unser, check_dtype=check_dtype,
check_index_type=check_index_type,
check_column_type=check_column_type)
else:
tm.assert_frame_equal(df, unser, check_less_precise=False,
check_dtype=check_dtype)
def _check_all_orients(df, dtype=None, convert_axes=True,
raise_ok=None, sort=None, check_index_type=True,
check_column_type=True):
# numpy=False
if convert_axes:
_check_orient(df, "columns", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "records", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "split", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "index", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "values", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "columns", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "records", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "split", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "index", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "values", dtype=dtype,
convert_axes=False, sort=sort)
# numpy=True and raise_ok might be not None, so ignore the error
if convert_axes:
_check_orient(df, "columns", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "records", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "split", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "index", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "values", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "columns", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "records", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "split", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "index", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "values", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
# basic
_check_all_orients(self.frame)
assert self.frame.to_json() == self.frame.to_json(orient="columns")
_check_all_orients(self.intframe, dtype=self.intframe.values.dtype)
_check_all_orients(self.intframe, dtype=False)
# big one
# index and columns are strings as all unserialised JSON object keys
# are assumed to be strings
biggie = DataFrame(np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)])
_check_all_orients(biggie, dtype=False, convert_axes=False)
# dtypes
_check_all_orients(DataFrame(biggie, dtype=np.float64),
dtype=np.float64, convert_axes=False)
_check_all_orients(DataFrame(biggie, dtype=np.int), dtype=np.int,
convert_axes=False)
_check_all_orients(DataFrame(biggie, dtype='U3'), dtype='U3',
convert_axes=False, raise_ok=ValueError)
# categorical
_check_all_orients(self.categorical, sort='sort', raise_ok=ValueError)
# empty
_check_all_orients(self.empty_frame, check_index_type=False,
check_column_type=False)
# time series data
_check_all_orients(self.tsframe)
# mixed data
index = pd.Index(['a', 'b', 'c', 'd', 'e'])
data = {'A': [0., 1., 2., 3., 4.],
'B': [0., 1., 0., 1., 0.],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': [True, False, True, False, True]}
df = DataFrame(data=data, index=index)
_check_orient(df, "split", check_dtype=False)
_check_orient(df, "records", check_dtype=False)
_check_orient(df, "values", check_dtype=False)
_check_orient(df, "columns", check_dtype=False)
# index oriented is problematic as it is read back in in a transposed
# state, so the columns are interpreted as having mixed data and
# given object dtypes.
# force everything to have object dtype beforehand
_check_orient(df.transpose().transpose(), "index", dtype=False)
def test_frame_from_json_bad_data(self):
pytest.raises(ValueError, read_json, StringIO('{"key":b:a:d}'))
# too few indices
json = StringIO('{"columns":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
pytest.raises(ValueError, read_json, json,
orient="split")
# too many columns
json = StringIO('{"columns":["A","B","C"],'
'"index":["1","2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
pytest.raises(AssertionError, read_json, json,
orient="split")
# bad key
json = StringIO('{"badkey":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
with tm.assert_raises_regex(ValueError,
r"unexpected key\(s\): badkey"):
read_json(json, orient="split")
def test_frame_from_json_nones(self):
df = DataFrame([[1, 2], [4, 5, 6]])
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
df = DataFrame([['1', '2'], ['4', '5', '6']])
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), dtype=False)
assert unser[2][0] is None
unser = read_json(df.to_json(), convert_axes=False, dtype=False)
assert unser['2']['0'] is None
unser = read_json(df.to_json(), numpy=False)
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), numpy=False, dtype=False)
assert unser[2][0] is None
unser = read_json(df.to_json(), numpy=False,
convert_axes=False, dtype=False)
assert unser['2']['0'] is None
# infinities get mapped to nulls which get mapped to NaNs during
# deserialisation
df = DataFrame([[1, 2], [4, 5, 6]])
df.loc[0, 2] = np.inf
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), dtype=False)
assert np.isnan(unser[2][0])
df.loc[0, 2] = np.NINF
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), dtype=False)
assert np.isnan(unser[2][0])
@pytest.mark.skipif(is_platform_32bit(),
reason="not compliant on 32-bit, xref #15865")
def test_frame_to_json_float_precision(self):
df = pd.DataFrame([dict(a_float=0.95)])
encoded = df.to_json(double_precision=1)
assert encoded == '{"a_float":{"0":1.0}}'
df = pd.DataFrame([dict(a_float=1.95)])
encoded = df.to_json(double_precision=1)
assert encoded == '{"a_float":{"0":2.0}}'
df = pd.DataFrame([dict(a_float=-1.95)])
encoded = df.to_json(double_precision=1)
assert encoded == '{"a_float":{"0":-2.0}}'
df = pd.DataFrame([dict(a_float=0.995)])
encoded = df.to_json(double_precision=2)
assert encoded == '{"a_float":{"0":1.0}}'
df = pd.DataFrame([dict(a_float=0.9995)])
encoded = df.to_json(double_precision=3)
assert encoded == '{"a_float":{"0":1.0}}'
df = pd.DataFrame([dict(a_float=0.99999999999999944)])
encoded = df.to_json(double_precision=15)
assert encoded == '{"a_float":{"0":1.0}}'
def test_frame_to_json_except(self):
df = DataFrame([1, 2, 3])
pytest.raises(ValueError, df.to_json, orient="garbage")
def test_frame_empty(self):
df = DataFrame(columns=['jim', 'joe'])
assert not df._is_mixed_type
assert_frame_equal(read_json(df.to_json(), dtype=dict(df.dtypes)), df,
check_index_type=False)
# GH 7445
result = pd.DataFrame({'test': []}, index=[]).to_json(orient='columns')
expected = '{"test":{}}'
assert result == expected
def test_frame_empty_mixedtype(self):
# mixed type
df = DataFrame(columns=['jim', 'joe'])
df['joe'] = df['joe'].astype('i8')
assert df._is_mixed_type
assert_frame_equal(read_json(df.to_json(), dtype=dict(df.dtypes)), df,
check_index_type=False)
def test_frame_mixedtype_orient(self): # GH10289
vals = [[10, 1, 'foo', .1, .01],
[20, 2, 'bar', .2, .02],
[30, 3, 'baz', .3, .03],
[40, 4, 'qux', .4, .04]]
df = DataFrame(vals, index=list('abcd'),
columns=['1st', '2nd', '3rd', '4th', '5th'])
assert df._is_mixed_type
right = df.copy()
for orient in ['split', 'index', 'columns']:
inp = df.to_json(orient=orient)
left = read_json(inp, orient=orient, convert_axes=False)
assert_frame_equal(left, right)
right.index = np.arange(len(df))
inp = df.to_json(orient='records')
left = read_json(inp, orient='records', convert_axes=False)
assert_frame_equal(left, right)
right.columns = np.arange(df.shape[1])
inp = df.to_json(orient='values')
left = read_json(inp, orient='values', convert_axes=False)
assert_frame_equal(left, right)
def test_v12_compat(self):
df = DataFrame(
[[1.56808523, 0.65727391, 1.81021139, -0.17251653],
[-0.2550111, -0.08072427, -0.03202878, -0.17581665],
[1.51493992, 0.11805825, 1.629455, -1.31506612],
[-0.02765498, 0.44679743, 0.33192641, -0.27885413],
[0.05951614, -2.69652057, 1.28163262, 0.34703478]],
columns=['A', 'B', 'C', 'D'],
index=pd.date_range('2000-01-03', '2000-01-07'))
df['date'] = pd.Timestamp('19920106 18:21:32.12')
df.iloc[3, df.columns.get_loc('date')] = pd.Timestamp('20130101')
df['modified'] = df['date']
df.iloc[1, df.columns.get_loc('modified')] = pd.NaT
v12_json = os.path.join(self.dirpath, 'tsframe_v012.json')
df_unser = pd.read_json(v12_json)
assert_frame_equal(df, df_unser)
df_iso = df.drop(['modified'], axis=1)
v12_iso_json = os.path.join(self.dirpath, 'tsframe_iso_v012.json')
df_unser_iso = pd.read_json(v12_iso_json)
assert_frame_equal(df_iso, df_unser_iso)
def test_blocks_compat_GH9037(self):
index = pd.date_range('20000101', periods=10, freq='H')
df_mixed = DataFrame(OrderedDict(
float_1=[-0.92077639, 0.77434435, 1.25234727, 0.61485564,
-0.60316077, 0.24653374, 0.28668979, -2.51969012,
0.95748401, -1.02970536],
int_1=[19680418, 75337055, 99973684, 65103179, 79373900,
40314334, 21290235, 4991321, 41903419, 16008365],
str_1=['78c608f1', '64a99743', '13d2ff52', 'ca7f4af2', '97236474',
'bde7e214', '1a6bde47', 'b1190be5', '7a669144', '8d64d068'],
float_2=[-0.0428278, -1.80872357, 3.36042349, -0.7573685,
-0.48217572, 0.86229683, 1.08935819, 0.93898739,
-0.03030452, 1.43366348],
str_2=['14f04af9', 'd085da90', '4bcfac83', '81504caf', '2ffef4a9',
'08e2f5c4', '07e1af03', 'addbd4a7', '1f6a09ba', '4bfc4d87'],
int_2=[86967717, 98098830, 51927505, 20372254, 12601730, 20884027,
34193846, 10561746, 24867120, 76131025]
), index=index)
# JSON deserialisation always creates unicode strings
df_mixed.columns = df_mixed.columns.astype('unicode')
df_roundtrip = pd.read_json(df_mixed.to_json(orient='split'),
orient='split')
assert_frame_equal(df_mixed, df_roundtrip,
check_index_type=True,
check_column_type=True,
check_frame_type=True,
by_blocks=True,
check_exact=True)
def test_series_non_unique_index(self):
s = Series(['a', 'b'], index=[1, 1])
pytest.raises(ValueError, s.to_json, orient='index')
assert_series_equal(s, read_json(s.to_json(orient='split'),
orient='split', typ='series'))
unser = read_json(s.to_json(orient='records'),
orient='records', typ='series')
tm.assert_numpy_array_equal(s.values, unser.values)
def test_series_from_json_to_json(self):
def _check_orient(series, orient, dtype=None, numpy=False,
check_index_type=True):
series = series.sort_index()
unser = read_json(series.to_json(orient=orient),
typ='series', orient=orient, numpy=numpy,
dtype=dtype)
unser = unser.sort_index()
if orient == "records" or orient == "values":
assert_almost_equal(series.values, unser.values)
else:
if orient == "split":
assert_series_equal(series, unser,
check_index_type=check_index_type)
else:
assert_series_equal(series, unser, check_names=False,
check_index_type=check_index_type)
def _check_all_orients(series, dtype=None, check_index_type=True):
_check_orient(series, "columns", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "records", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "split", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "index", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "values", dtype=dtype)
_check_orient(series, "columns", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "records", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "split", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "index", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "values", dtype=dtype, numpy=True,
check_index_type=check_index_type)
# basic
_check_all_orients(self.series)
assert self.series.to_json() == self.series.to_json(orient="index")
objSeries = Series([str(d) for d in self.objSeries],
index=self.objSeries.index,
name=self.objSeries.name)
_check_all_orients(objSeries, dtype=False)
# empty_series has empty index with object dtype
# which cannot be revert
assert self.empty_series.index.dtype == np.object_
_check_all_orients(self.empty_series, check_index_type=False)
_check_all_orients(self.ts)
# dtype
s = Series(lrange(6), index=['a', 'b', 'c', 'd', 'e', 'f'])
_check_all_orients(Series(s, dtype=np.float64), dtype=np.float64)
_check_all_orients(Series(s, dtype=np.int), dtype=np.int)
def test_series_to_json_except(self):
s = Series([1, 2, 3])
pytest.raises(ValueError, s.to_json, orient="garbage")
def test_series_from_json_precise_float(self):
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ='series', precise_float=True)
assert_series_equal(result, s, check_index_type=False)
def test_frame_from_json_precise_float(self):
df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])
result = read_json(df.to_json(), precise_float=True)
assert_frame_equal(result, df, check_index_type=False,
check_column_type=False)
def test_typ(self):
s = Series(lrange(6), index=['a', 'b', 'c',
'd', 'e', 'f'], dtype='int64')
result = read_json(s.to_json(), typ=None)
assert_series_equal(result, s)
def test_reconstruction_index(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]])
result = read_json(df.to_json())
assert_frame_equal(result, df)
df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}, index=['A', 'B', 'C'])
result = read_json(df.to_json())
assert_frame_equal(result, df)
def test_path(self):
with ensure_clean('test.json') as path:
for df in [self.frame, self.frame2, self.intframe, self.tsframe,
self.mixed_frame]:
df.to_json(path)
read_json(path)
def test_axis_dates(self):
# frame
json = self.tsframe.to_json()
result = read_json(json)
assert_frame_equal(result, self.tsframe)
# series
json = self.ts.to_json()
result = read_json(json, typ='series')
assert_series_equal(result, self.ts, check_names=False)
assert result.name is None
def test_convert_dates(self):
# frame
df = self.tsframe.copy()
df['date'] = Timestamp('20130101')
json = df.to_json()
result = read_json(json)
assert_frame_equal(result, df)
df['foo'] = 1.
json = df.to_json(date_unit='ns')
result = read_json(json, convert_dates=False)
expected = df.copy()
expected['date'] = expected['date'].values.view('i8')
expected['foo'] = expected['foo'].astype('int64')
assert_frame_equal(result, expected)
# series
ts = Series(Timestamp('20130101'), index=self.ts.index)
json = ts.to_json()
result = read_json(json, typ='series')
assert_series_equal(result, ts)
def test_convert_dates_infer(self):
# GH10747
from pandas.io.json import dumps
infer_words = ['trade_time', 'date', 'datetime', 'sold_at',
'modified', 'timestamp', 'timestamps']
for infer_word in infer_words:
data = [{'id': 1, infer_word: 1036713600000}, {'id': 2}]
expected = DataFrame([[1, Timestamp('2002-11-08')], [2, pd.NaT]],
columns=['id', infer_word])
result = read_json(dumps(data))[['id', infer_word]]
assert_frame_equal(result, expected)
def test_date_format_frame(self):
df = self.tsframe.copy()
def test_w_date(date, date_unit=None):
df['date'] = Timestamp(date)
df.iloc[1, df.columns.get_loc('date')] = pd.NaT
df.iloc[5, df.columns.get_loc('date')] = pd.NaT
if date_unit:
json = df.to_json(date_format='iso', date_unit=date_unit)
else:
json = df.to_json(date_format='iso')
result = read_json(json)
assert_frame_equal(result, df)
test_w_date('20130101 20:43:42.123')
test_w_date('20130101 20:43:42', date_unit='s')
test_w_date('20130101 20:43:42.123', date_unit='ms')
test_w_date('20130101 20:43:42.123456', date_unit='us')
test_w_date('20130101 20:43:42.123456789', date_unit='ns')
pytest.raises(ValueError, df.to_json, date_format='iso',
date_unit='foo')
def test_date_format_series(self):
def test_w_date(date, date_unit=None):
ts = Series(Timestamp(date), index=self.ts.index)
ts.iloc[1] = pd.NaT
ts.iloc[5] = pd.NaT
if date_unit:
json = ts.to_json(date_format='iso', date_unit=date_unit)
else:
json = ts.to_json(date_format='iso')
result = read_json(json, typ='series')
assert_series_equal(result, ts)
test_w_date('20130101 20:43:42.123')
test_w_date('20130101 20:43:42', date_unit='s')
test_w_date('20130101 20:43:42.123', date_unit='ms')
test_w_date('20130101 20:43:42.123456', date_unit='us')
test_w_date('20130101 20:43:42.123456789', date_unit='ns')
ts = Series(Timestamp('20130101 20:43:42.123'), index=self.ts.index)
pytest.raises(ValueError, ts.to_json, date_format='iso',
date_unit='foo')
def test_date_unit(self):
df = self.tsframe.copy()
df['date'] = Timestamp('20130101 20:43:42')
dl = df.columns.get_loc('date')
df.iloc[1, dl] = Timestamp('19710101 20:43:42')
df.iloc[2, dl] = Timestamp('21460101 20:43:42')
df.iloc[4, dl] = pd.NaT
for unit in ('s', 'ms', 'us', 'ns'):
json = df.to_json(date_format='epoch', date_unit=unit)
# force date unit
result = read_json(json, date_unit=unit)
assert_frame_equal(result, df)
# detect date unit
result = read_json(json, date_unit=None)
assert_frame_equal(result, df)
def test_weird_nested_json(self):
# this used to core dump the parser
s = r'''{
"status": "success",
"data": {
"posts": [
{
"id": 1,
"title": "A blog post",
"body": "Some useful content"
},
{
"id": 2,
"title": "Another blog post",
"body": "More content"
}
]
}
}'''
read_json(s)
def test_doc_example(self):
dfj2 = DataFrame(np.random.randn(5, 2), columns=list('AB'))
dfj2['date'] = Timestamp('20130101')
dfj2['ints'] = lrange(5)
dfj2['bools'] = True
dfj2.index = pd.date_range('20130101', periods=5)
json = dfj2.to_json()
result = read_json(json, dtype={'ints': np.int64, 'bools': np.bool_})
assert_frame_equal(result, result)
def test_misc_example(self):
# parsing unordered input fails
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]', numpy=True)
expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
error_msg = """DataFrame\\.index are different
DataFrame\\.index values are different \\(100\\.0 %\\)
\\[left\\]: Index\\(\\[u?'a', u?'b'\\], dtype='object'\\)
\\[right\\]: RangeIndex\\(start=0, stop=2, step=1\\)"""
with tm.assert_raises_regex(AssertionError, error_msg):
assert_frame_equal(result, expected, check_index_type=False)
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]')
expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
assert_frame_equal(result, expected)
@network
def test_round_trip_exception_(self):
# GH 3867
csv = 'https://raw.github.com/hayd/lahman2012/master/csvs/Teams.csv'
df = pd.read_csv(csv)
s = df.to_json()
result = pd.read_json(s)
assert_frame_equal(result.reindex(
index=df.index, columns=df.columns), df)
@network
def test_url(self):
url = 'https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5' # noqa
result = read_json(url, convert_dates=True)
for c in ['created_at', 'closed_at', 'updated_at']:
assert result[c].dtype == 'datetime64[ns]'
def test_timedelta(self):
converter = lambda x: pd.to_timedelta(x, unit='ms')
s = Series([timedelta(23), timedelta(seconds=5)])
assert s.dtype == 'timedelta64[ns]'
result = pd.read_json(s.to_json(), typ='series').apply(converter)
assert_series_equal(result, s)
s = Series([timedelta(23), timedelta(seconds=5)],
index=pd.Index([0, 1]))
assert s.dtype == 'timedelta64[ns]'
result = pd.read_json(s.to_json(), typ='series').apply(converter)
assert_series_equal(result, s)
frame = DataFrame([timedelta(23), timedelta(seconds=5)])
assert frame[0].dtype == 'timedelta64[ns]'
assert_frame_equal(frame, pd.read_json(frame.to_json())
.apply(converter))
frame = DataFrame({'a': [timedelta(days=23), timedelta(seconds=5)],
'b': [1, 2],
'c': pd.date_range(start='20130101', periods=2)})
result = pd.read_json(frame.to_json(date_unit='ns'))
result['a'] = pd.to_timedelta(result.a, unit='ns')
result['c'] = pd.to_datetime(result.c)
assert_frame_equal(frame, result)
def test_mixed_timedelta_datetime(self):
frame = DataFrame({'a': [timedelta(23), pd.Timestamp('20130101')]},
dtype=object)
expected = DataFrame({'a': [pd.Timedelta(frame.a[0]).value,
pd.Timestamp(frame.a[1]).value]})
result = pd.read_json(frame.to_json(date_unit='ns'),
dtype={'a': 'int64'})
assert_frame_equal(result, expected, check_index_type=False)
def test_default_handler(self):
value = object()
frame = DataFrame({'a': [7, value]})
expected = DataFrame({'a': [7, str(value)]})
result = pd.read_json(frame.to_json(default_handler=str))
assert_frame_equal(expected, result, check_index_type=False)
def test_default_handler_indirect(self):
from pandas.io.json import dumps
def default(obj):
if isinstance(obj, complex):
return [('mathjs', 'Complex'),
('re', obj.real),
('im', obj.imag)]
return str(obj)
df_list = [9, DataFrame({'a': [1, 'STR', complex(4, -5)],
'b': [float('nan'), None, 'N/A']},
columns=['a', 'b'])]
expected = ('[9,[[1,null],["STR",null],[[["mathjs","Complex"],'
'["re",4.0],["im",-5.0]],"N\\/A"]]]')
assert dumps(df_list, default_handler=default,
orient="values") == expected
def test_default_handler_numpy_unsupported_dtype(self):
# GH12554 to_json raises 'Unhandled numpy dtype 15'
df = DataFrame({'a': [1, 2.3, complex(4, -5)],
'b': [float('nan'), None, complex(1.2, 0)]},
columns=['a', 'b'])
expected = ('[["(1+0j)","(nan+0j)"],'
'["(2.3+0j)","(nan+0j)"],'
'["(4-5j)","(1.2+0j)"]]')
assert df.to_json(default_handler=str, orient="values") == expected
def test_default_handler_raises(self):
def my_handler_raises(obj):
raise TypeError("raisin")
pytest.raises(TypeError,
DataFrame({'a': [1, 2, object()]}).to_json,
default_handler=my_handler_raises)
pytest.raises(TypeError,
DataFrame({'a': [1, 2, complex(4, -5)]}).to_json,
default_handler=my_handler_raises)
def test_categorical(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = DataFrame({"A": ["a", "b", "c", "a", "b", "b", "a"]})
df["B"] = df["A"]
expected = df.to_json()
df["B"] = df["A"].astype('category')
assert expected == df.to_json()
s = df["A"]
sc = df["B"]
assert s.to_json() == sc.to_json()
def test_datetime_tz(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
tz_range = pd.date_range('20130101', periods=3, tz='US/Eastern')
tz_naive = tz_range.tz_convert('utc').tz_localize(None)
df = DataFrame({
'A': tz_range,
'B': pd.date_range('20130101', periods=3)})
df_naive = df.copy()
df_naive['A'] = tz_naive
expected = df_naive.to_json()
assert expected == df.to_json()
stz = Series(tz_range)
s_naive = Series(tz_naive)
assert stz.to_json() == s_naive.to_json()
def test_sparse(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = pd.DataFrame(np.random.randn(10, 4))
df.loc[:8] = np.nan
sdf = df.to_sparse()
expected = df.to_json()
assert expected == sdf.to_json()
s = pd.Series(np.random.randn(10))
s.loc[:8] = np.nan
ss = s.to_sparse()
expected = s.to_json()
assert expected == ss.to_json()
def test_tz_is_utc(self):
from pandas.io.json import dumps
exp = '"2013-01-10T05:00:00.000Z"'
ts = Timestamp('2013-01-10 05:00:00Z')
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
ts = Timestamp('2013-01-10 00:00:00', tz='US/Eastern')
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
ts = Timestamp('2013-01-10 00:00:00-0500')
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
def test_tz_range_is_utc(self):
from pandas.io.json import dumps
exp = '["2013-01-01T05:00:00.000Z","2013-01-02T05:00:00.000Z"]'
dfexp = ('{"DT":{'
'"0":"2013-01-01T05:00:00.000Z",'
'"1":"2013-01-02T05:00:00.000Z"}}')
tz_range = pd.date_range('2013-01-01 05:00:00Z', periods=2)
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({'DT': dti})
assert dumps(df, iso_dates=True) == dfexp
tz_range = pd.date_range('2013-01-01 00:00:00', periods=2,
tz='US/Eastern')
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({'DT': dti})
assert dumps(df, iso_dates=True) == dfexp
tz_range = pd.date_range('2013-01-01 00:00:00-0500', periods=2)
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({'DT': dti})
assert dumps(df, iso_dates=True) == dfexp
def test_latin_encoding(self):
if compat.PY2:
tm.assert_raises_regex(
TypeError, r'\[unicode\] is not implemented as a table column')
return
# GH 13774
pytest.skip("encoding not implemented in .to_json(), "
"xref #13774")
values = [[b'E\xc9, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'a', b'b', b'c'],
[b'EE, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'\xf8\xfc', b'a', b'b', b'c'],
[b'', b'a', b'b', b'c'],
[b'\xf8\xfc', b'a', b'b', b'c'],
[b'A\xf8\xfc', b'', b'a', b'b', b'c'],
[np.nan, b'', b'b', b'c'],
[b'A\xf8\xfc', np.nan, b'', b'b', b'c']]
def _try_decode(x, encoding='latin-1'):
try:
return x.decode(encoding)
except AttributeError:
return x
# not sure how to remove latin-1 from code in python 2 and 3
values = [[_try_decode(x) for x in y] for y in values]
examples = []
for dtype in ['category', object]:
for val in values:
examples.append(Series(val, dtype=dtype))
def roundtrip(s, encoding='latin-1'):
with ensure_clean('test.json') as path:
s.to_json(path, encoding=encoding)
retr = read_json(path, encoding=encoding)
assert_series_equal(s, retr, check_categorical=False)
for s in examples:
roundtrip(s)
def test_data_frame_size_after_to_json(self):
# GH15344
df = DataFrame({'a': [str(1)]})
size_before = df.memory_usage(index=True, deep=True).sum()
df.to_json()
size_after = df.memory_usage(index=True, deep=True).sum()
assert size_before == size_after
| bsd-3-clause |
joshfuchs/ZZCeti_pipeline | spectools.py | 1 | 35547 | """
This program contains various definitions and commonly done things
for spectra for the ZZ CETI pipeline.
Written primarily by JT Fuchs
Based on pySALT
"""
#import pyfits as fits
import astropy.io.fits as fits
import ReduceSpec_tools as rt
import numpy as np
import scipy
from scipy.interpolate import InterpolatedUnivariateSpline as interpo
from scipy.interpolate import UnivariateSpline
import os
class spectrum(object):
def __init__(self,opfarr,farr,sky,sigma,warr):
self.opfarr = opfarr
self.farr = farr
self.sky = sky
self.sigma = sigma
self.warr = warr
# ===========================================================================
class standard(object):
def __init__(self,warr,magarr,wbin):
self.warr = warr
self.magarr = magarr
self.wbin = wbin
# ===========================================================================
def readspectrum(specfile):
""" Given a specfile, read in the spectra and return
a spectrum object consisting of
opfar(optimally extracted spectrum),farr(raw extracted spectrum),sky(background),sigma(sigma spectrum)
"""
spec = fits.open(specfile)
opfarr = spec[0].data[0,0,:]
farr = spec[0].data[1,0,:]
sky = spec[0].data[2,0,:]
sigma = spec[0].data[3,0,:]
#Read in header info
airmass = spec[0].header['airmass']
exptime = spec[0].header['exptime']
'''
#Set up wavelengths using linear dispersion
specwav0 = spec[0].header['crval1'] #Grab the leftmost wavelength coordinate
specdeltawav = spec[0].header['cd1_1'] #Grab the delta coordinate
warr = np.zeros(len(farr)) #Fill an array with appropriate wavelength values
warr[0] = specwav0
ival = np.arange(1,len(farr))
for i in ival:
warr[i] = warr[i-1] + specdeltawav
'''
#Set up wavelengths using grating equation
alpha = float(spec[0].header['GRT_TARG'])
theta = float(spec[0].header['CAM_TARG'])
fr = float(spec[0].header['LINDEN'])
fd = float(spec[0].header['CAMFUD'])
fl = float(spec[0].header['FOCLEN'])
zPnt = float(spec[0].header['ZPOINT'])
trim_sec= spec[0].header["CCDSEC"]
trim_offset= float( trim_sec[1:len(trim_sec)-1].split(':')[0] )-1
try:
bining= float( spec[0].header["PARAM18"] )
except:
bining= float( spec[0].header["PG3_2"] )
nx= np.size(opfarr)#spec_data[0]
Pixels= bining*(np.arange(0,nx,1)+trim_offset)
WDwave = DispCalc(Pixels, alpha, theta, fr, fd, fl, zPnt)
warr = np.asarray(WDwave)
specdeltawav = np.zeros(len(warr))
specdeltawav[0] = warr[1] - warr[0]
for i in range(1,len(warr)):
specdeltawav[i] = warr[i] - warr[i-1]
result = spectrum(opfarr,farr,sky,sigma,warr)
return result,airmass,exptime,specdeltawav
# ===========================================================================
def DispCalc(Pixels, alpha, theta, fr, fd, fl, zPnt):
# This is the Grating Equation used to calculate the wavelenght of a pixel
# based on the fitted parameters and angle set up.
# Inputs:
# Pixels= Vector of Pixel Numbers
# alpha= of Grating Angle
# aheta= Camera Angle
# fr= fringe density of grating
# fd= Camera Angle Correction Factor
# zPnt= Zero point pixel
Wavelengths= [] # Vector to store calculated wavelengths
for pix in Pixels:
beta = np.arctan( (pix-zPnt)*15./fl ) + (fd*theta*np.pi/180.) - (alpha*np.pi/180.)
wave = (10**6.)*( np.sin(beta) + np.sin(alpha*np.pi/180.) )/fr
Wavelengths.append(wave)
return Wavelengths
# ===========================================================================
def readheader(specfile):
spec = fits.open(specfile)
#Delete the parts of the header that are not uniform in Goodman. These are primarily the parts that contain degree symbols.
header = spec[0].header
del header['param0']
del header['param61']
del header['param62']
del header['param63']
return header
# ===========================================================================
def readstandard(stdfile):
warr,magarr,wbin = np.genfromtxt(stdfile,unpack=True)
result = standard(warr,magarr,wbin)
return result
# ===========================================================================
def applywavelengths(wavefile,applyfile,newname):
#Read in file with wavelength solution and get header info
wave = fits.open(wavefile)
n_fr = float(wave[0].header['LINDEN'])
n_fd = float(wave[0].header['CAMFUD'])
fl = float(wave[0].header['FOCLEN'])
zPnt = float(wave[0].header['ZPOINT'])
#Read in file to apply wavelength solution and update header
spec_data= fits.getdata(applyfile)
spec_header= fits.getheader(applyfile)
rt.Fix_Header(spec_header)
spec_header.append( ('LINDEN', n_fr,'Line Desity for Grating Eq.'),
useblanks= True, bottom= True )
spec_header.append( ('CAMFUD', n_fd,'Camera Angle Correction Factor for Grat. Eq.'),
useblanks= True, bottom= True )
spec_header.append( ('FOCLEN', fl,'Focal Length for Grat Eq.'),
useblanks= True, bottom= True )
spec_header.append( ('ZPOINT', zPnt,'Zero Point Pixel for Grat Eq.'),
useblanks= True, bottom= True )
NewspecHdu = fits.PrimaryHDU(data= spec_data, header= spec_header)
#See if new file already exists
mylist = [True for f in os.listdir('.') if f == newname]
exists = bool(mylist)
clob = False
if exists:
print 'File %s already exists.' % newname
nextstep = raw_input('Do you want to overwrite or designate a new name (overwrite/new)? ')
if nextstep == 'overwrite':
clob = True
exists = False
elif nextstep == 'new':
newname = raw_input('New file name: ')
exists = False
else:
exists = False
NewspecHdu.writeto(newname, output_verify='warn', clobber= clob)
# ===========================================================================
def magtoflux(marr, fzero):
"""Convert from magnitude to flux
marr - input array in mags
fzero - zero point for convertion
"""
return fzero * 10. ** (-0.4 * marr)
# ===========================================================================
def fnutofwave(warr,farr):
"""Converts farr in ergs/s/cm2/Hz to ergs/s/cm2/A"""
c = 2.99792458e18 #speed of light in Angstroms/s
return farr * c / warr**2.
# ===========================================================================
def sum_std(std_warr,wbin,spec_warr,spec_farr):
#Sum the standard star spectrum into the same bins as the flux
#calibration file.
n = 0
for lambdas in std_warr:
low = lambdas - wbin[n]/2.
high = lambdas + wbin[n]/2.
#print low,high
c = np.where(spec_warr >= low)
d = np.where(spec_warr <= high)
lowflux = np.asarray(c)
highflux = np.asarray(d)
index = np.intersect1d(lowflux,highflux)
fluxnew = spec_farr[index]
wavenew = spec_warr[index]
#print wavenew[0],wavenew[-1]
total = np.sum(fluxnew)
if n == 0:
result = [total]
if n > 0:
result.append(total)
#blah = np.asarray(result)
#print blah[n]
n += 1.
return np.asarray(result)
# ===========================================================================
def sensfunc(obs_counts,std_flux,exptime,bins,airmass):
#This function calculates the sensitivity curve for the spectrum
#It is calculated by:
#C = 2.5 * log(obs_counts/ (exptime * bin * std_flux)) + airmass * extinction
n = 0
for counts in obs_counts:
cal = 2.5 * np.log10(counts/ (exptime * bins[n] * std_flux[n]))
if n == 0:
sens = [cal]
if n > 0:
sens.append(cal)
n += 1
sensitivity = np.asarray(sens)
return sensitivity
# ===========================================================================
def cal_spec(counts,sens,exptime,disp):
#Calibrates a observed star using a sensitivity function
flux = (counts) / (exptime * disp * 10.**(sens/2.5))
return flux
# ===========================================================================
def extinction_correction(lams, flux, airmass):
'''
Extinction correction based on Strizinger et. al. 2005 values for CTIO
'''
# Function inputs are wavelengths and flux values for the spectrum as well
# as the airmass the spectrum was measured at
# wavelength-dependent extinction coefficients from CTIO
# Strizinger et. al. 2005
ctio_lams = [3050.0, 3084.6500000000001, 3119.3099999999999, 3153.96, 3188.6100000000001, 3223.27, 3257.9200000000001, 3292.5700000000002, 3327.23, 3361.8800000000001, 3396.54, 3431.1900000000001, 3465.8400000000001, 3500.5, 3535.1500000000001, 3569.8000000000002, 3604.46, 3639.1100000000001, 3673.7600000000002, 3708.4200000000001, 3743.0700000000002, 3777.7199999999998, 3812.3800000000001, 3847.0300000000002, 3881.6900000000001, 3916.3400000000001, 3950.9899999999998, 3985.6500000000001, 4020.3000000000002, 4054.9499999999998, 4089.6100000000001, 4124.2600000000002, 4158.9099999999999, 4193.5699999999997, 4228.2200000000003, 4262.8699999999999, 4297.5299999999997, 4332.1800000000003, 4366.8299999999999, 4401.4899999999998, 4436.1400000000003, 4470.79, 4505.4499999999998, 4540.1000000000004, 4574.7600000000002, 4609.4099999999999, 4644.0600000000004, 4678.7200000000003, 4713.3699999999999, 4748.0200000000004, 4782.6800000000003, 4817.3299999999999, 4851.9799999999996, 4886.6400000000003, 4921.29, 4955.9399999999996, 4990.6000000000004, 5025.25, 5059.9099999999999, 5094.5600000000004, 5129.21, 5163.8699999999999, 5198.5200000000004, 5233.1700000000001, 5267.8299999999999, 5302.4799999999996, 5337.1300000000001, 5371.79, 5406.4399999999996, 5441.0900000000001, 5475.75, 5510.3999999999996, 5545.0500000000002, 5579.71, 5614.3599999999997, 5649.0200000000004, 5683.6700000000001, 5718.3199999999997, 5752.9799999999996, 5787.6300000000001, 5822.2799999999997, 5856.9399999999996, 5891.5900000000001, 5926.2399999999998, 5960.8999999999996, 5995.5500000000002, 6030.1999999999998, 6064.8599999999997, 6099.5100000000002, 6134.1700000000001, 6168.8199999999997, 6203.4700000000003, 6238.1300000000001, 6272.7799999999997, 6307.4300000000003, 6342.0900000000001, 6376.7399999999998, 6411.3900000000003, 6446.0500000000002, 6480.6999999999998, 6482.8500000000004, 6535.3800000000001, 6587.9099999999999, 6640.4399999999996, 6692.96, 6745.4899999999998, 6798.0200000000004, 6850.5500000000002, 6903.0699999999997, 6955.6000000000004, 7008.1300000000001, 7060.6499999999996, 7113.1800000000003, 7165.71, 7218.2399999999998, 7270.7600000000002, 7323.29, 7375.8199999999997, 7428.3500000000004, 7480.8699999999999, 7533.3999999999996, 7585.9300000000003, 7638.4499999999998, 7690.9799999999996, 7743.5100000000002, 7796.04, 7848.5600000000004, 7901.0900000000001, 7953.6199999999999, 8006.1499999999996, 8058.6700000000001, 8111.1999999999998, 8163.7299999999996, 8216.25, 8268.7800000000007, 8321.3099999999995, 8373.8400000000001, 8426.3600000000006, 8478.8899999999994, 8531.4200000000001, 8583.9500000000007, 8636.4699999999993, 8689.0, 8741.5300000000007, 8794.0499999999993, 8846.5799999999999, 8899.1100000000006, 8951.6399999999994, 9004.1599999999999, 9056.6900000000005, 9109.2199999999993, 9161.75, 9214.2700000000004, 9266.7999999999993, 9319.3299999999999, 9371.8500000000004, 9424.3799999999992, 9476.9099999999999, 9529.4400000000005, 9581.9599999999991, 9634.4899999999998, 9687.0200000000004, 9739.5499999999993, 9792.0699999999997, 9844.6000000000004, 9897.1299999999992, 9949.6499999999996, 10002.200000000001, 10054.700000000001, 10107.200000000001, 10159.799999999999, 10212.299999999999, 10264.799999999999, 10317.299999999999, 10369.9, 10422.4, 10474.9, 10527.5, 10580.0, 10632.5, 10685.0, 10737.6, 10790.1, 10842.6, 10895.1, 10947.700000000001, 11000.200000000001]
ctio_ext = [1.395, 1.2830000000000001, 1.181, 1.0880000000000001, 1.004, 0.92900000000000005, 0.86099999999999999, 0.80099999999999993, 0.748, 0.69999999999999996, 0.65900000000000003, 0.623, 0.59099999999999997, 0.56399999999999995, 0.54000000000000004, 0.52000000000000002, 0.502, 0.48700000000000004, 0.47299999999999998, 0.46000000000000002, 0.44799999999999995, 0.436, 0.42499999999999999, 0.41399999999999998, 0.40200000000000002, 0.39100000000000001, 0.38100000000000001, 0.37, 0.35999999999999999, 0.34899999999999998, 0.33899999999999997, 0.33000000000000002, 0.32100000000000001, 0.313, 0.30399999999999999, 0.29600000000000004, 0.28899999999999998, 0.28100000000000003, 0.27399999999999997, 0.26700000000000002, 0.26000000000000001, 0.254, 0.247, 0.24100000000000002, 0.23600000000000002, 0.23000000000000001, 0.22500000000000001, 0.22, 0.215, 0.20999999999999999, 0.20600000000000002, 0.20199999999999999, 0.19800000000000001, 0.19399999999999998, 0.19, 0.187, 0.184, 0.18100000000000002, 0.17800000000000002, 0.17600000000000002, 0.17300000000000001, 0.17100000000000001, 0.16899999999999998, 0.16699999999999998, 0.16600000000000001, 0.16399999999999998, 0.16300000000000001, 0.16200000000000001, 0.16, 0.159, 0.158, 0.158, 0.157, 0.156, 0.155, 0.155, 0.154, 0.153, 0.153, 0.152, 0.151, 0.151, 0.14999999999999999, 0.14899999999999999, 0.14899999999999999, 0.14800000000000002, 0.14699999999999999, 0.14599999999999999, 0.14400000000000002, 0.14300000000000002, 0.14199999999999999, 0.14000000000000001, 0.13800000000000001, 0.13600000000000001, 0.13400000000000001, 0.13200000000000001, 0.129, 0.126, 0.12300000000000001, 0.12, 0.12, 0.115, 0.111, 0.107, 0.10300000000000001, 0.099000000000000005, 0.096000000000000002, 0.091999999999999998, 0.088000000000000009, 0.085000000000000006, 0.08199999999999999, 0.078, 0.074999999999999997, 0.072000000000000008, 0.069000000000000006, 0.066000000000000003, 0.064000000000000001, 0.060999999999999999, 0.057999999999999996, 0.055999999999999994, 0.052999999999999999, 0.050999999999999997, 0.049000000000000002, 0.047, 0.044999999999999998, 0.042999999999999997, 0.040999999999999995, 0.039, 0.037000000000000005, 0.035000000000000003, 0.034000000000000002, 0.032000000000000001, 0.029999999999999999, 0.028999999999999998, 0.027999999999999997, 0.026000000000000002, 0.025000000000000001, 0.024, 0.023, 0.022000000000000002, 0.02, 0.019, 0.019, 0.018000000000000002, 0.017000000000000001, 0.016, 0.014999999999999999, 0.014999999999999999, 0.013999999999999999, 0.013000000000000001, 0.013000000000000001, 0.012, 0.011000000000000001, 0.011000000000000001, 0.011000000000000001, 0.01, 0.01, 0.0090000000000000011, 0.0090000000000000011, 0.0090000000000000011, 0.0080000000000000002, 0.0080000000000000002, 0.0080000000000000002, 0.0069999999999999993, 0.0069999999999999993, 0.0069999999999999993, 0.0069999999999999993, 0.0069999999999999993, 0.0060000000000000001, 0.0060000000000000001, 0.0060000000000000001, 0.0060000000000000001, 0.0060000000000000001, 0.0060000000000000001, 0.0050000000000000001, 0.0050000000000000001, 0.0050000000000000001, 0.0050000000000000001, 0.0050000000000000001, 0.0050000000000000001, 0.0040000000000000001, 0.0040000000000000001, 0.0040000000000000001, 0.0040000000000000001, 0.0030000000000000001, 0.0030000000000000001, 0.0030000000000000001]
smooth_param = 0.001
spline_fit = UnivariateSpline(ctio_lams, ctio_ext, s=smooth_param, k=3)
a_lambda = spline_fit(lams)
corrected_flux = flux*(10.0**(.4*a_lambda*(airmass)))
xx = np.linspace(np.min(ctio_lams), np.max(ctio_lams), 1000)
yy = spline_fit(xx)
'''
plt.figure()
plt.scatter(ctio_lams, ctio_ext, label=smooth_param)
plt.axvline(np.min(lams), color='g')
plt.axvline(np.max(lams), color='g')
plt.plot(xx, yy)
plt.xlabel('Wavelength')
plt.ylabel('Extinction Coefficient')
plt.title('Gemini Extinction Coefficient Fit')
'''
'''
plt.figure()
plt.plot(lams,flux)
plt.plot(lams,corrected_flux)
plt.show()
'''
return corrected_flux
# ===========================================================================
def resamplespec(w1, w0, spec0, oversamp=100):
"""
Resample a spectrum while conserving flux density.
Written by Ian Crossfield: www.lpl.arizona.edu/~ianc/python/index.html
:INPUTS:
w1 : sequence
new wavelength grid (i.e., center wavelength of each pixel)
w0 : sequence
old wavelength grid (i.e., center wavelength of each pixel)
spec0 : sequence
old spectrum (e.g., flux density or photon counts)
oversamp : int
factor by which to oversample input spectrum prior to
rebinning. The worst fractional precision you achieve is
roughly 1./oversamp.
:NOTE:
Format is the same as :func:`numpy.interp`
:REQUIREMENTS:
:doc:`tools`
"""
#from tools import errxy
# 2012-04-25 18:40 IJMC: Created
nlam = len(w0)
x0 = np.arange(nlam, dtype=float)
x0int = np.arange((nlam-1.)*oversamp + 1., dtype=float)/oversamp
w0int = np.interp(x0int, x0, w0)
spec0int = np.interp(w0int, w0, spec0)/oversamp
# Set up the bin edges for down-binning
maxdiffw1 = np.diff(w1).max()
w1bins = np.concatenate(([w1[0] - maxdiffw1],
.5*(w1[1::] + w1[0:-1]), \
[w1[-1] + maxdiffw1]))
# Bin down the interpolated spectrum:
junk, spec1, junk2, junk3 = errxy(w0int, spec0int, w1bins, xmode=None, ymode='sum', xerr=None, yerr=None)
return spec1
# ===========================================================================
def errxy(x,y,xbins, xmode='mean', ymode='mean', xerr='minmax', yerr='sdom', clean=None, binfactor=None, verbose=False,returnstats=False, timing=False):
"""Bin down datasets in X and Y for errorbar plotting
Written by Ian Crossfield: www.lpl.arizona.edu/~ianc/python/index.html
:INPUTS:
x -- (array) independent variable data
y -- (array) dependent variable data
xbins -- (array) edges of bins, in x-space. Only x-data
between two bin edges will be used. Thus if M bin
edges are entered, (M-1) datapoints will be returned.
If xbins==None, then no binning is done.
:OPTIONAL INPUT:
xmode/ymode -- (str) method to aggregate x/y data into datapoints:
'mean' -- use numpy.mean
'median' -- use numpy.median
'sum' -- use numpy.sum
None -- don't compute; return the empty list []
xerr/yerr -- (str) method to aggregate x/y data into errorbars
'std' -- sample standard deviation (numpy.std)
'sdom' -- standard deviation on the mean; i.e., std/sqrt(N)
'minmax' -- use full range of data in the bin
None -- don't compute; return the empty list []
binfactor -- (int) If not None, average over this many
consecutive values instead of binning explicitly by
time-based bins. Can also be a sequence, telling the
number of values over which to average. E.g.,
binfactor=[10,10,20] will bin over the first 10 points,
the second 10 points, and the next 20 points.
clean -- (dict) keyword options to clean y-data ONLY, via
analysis.removeoutliers, with an additional "nsigma"
keyword. See removeoutliers for more information.
E.g.: clean=dict(nsigma=5,remove='both',niter=1)
:OUTPUTS: a tuple of four arrays to be passed to matplotlib.pyplot.errorbar:
xx -- locations of the aggregated x-datapoint in each bin
yy -- locations of the aggregated y-datapoint in each bin
xerr -- x-errorbars
yerr -- y-errorbars
:EXAMPLE:
::
x = hstack((arange(10), arange(20)+40))
y = randn(len(x))
xbins = [-1,15,70]
xx,yy,xerr,yerr = errxy(x,y,xbins)
plot(x,y, '.b')
errorbar(xx,yy,xerr=xerr,yerr=yerr, fmt='or')
:NOTES:
To just bin down uncleaned data (i.e., no 'error' terms
returned), set clean, xerr, yerr to None. However, when
computing all values (xerr and yerr not None) it is faster
to set clean to some rediculous value, i.e.,
clean=dict(niter=0, nsigma=9e99). This probably means more
optimization could be done.
Be sure you call the errorbar function using the keywords xerr
and yerr, since otherwise the default order of inputs to the
function is (x,y,yerr,xerr).
Data 'x' are determined to be in a bin with sides (L, R) when
satisfying the condition (x>L) and (x<=R)
:SEE ALSO: matplotlib.pyplot.errorbar, :func:`analysis.removeoutliers`
:REQUIREMENTS: :doc:`numpy`, :doc:`analysis`
"""
# 2009-09-29 20:07 IJC: Created w/mean-median and std-sdom-minmax.
# 2009-12-14 16:01 IJC: xbins can be 'None' for no binning.
# 2009-12-15 10:09 IJC: Added "binfactor" option.
# 2009-12-22 09:56 IJC: "binfactor" can now be a sequence.
# 2009-12-29 01:16 IJC: Fixed a bug with binfactor sequences.
# 2010-04-29 09:59 IJC: Added 'returnstats' feature
# 2010-10-19 16:25 IJC: Added 'sum' option for x-data
# 2011-03-22 12:57 IJC: Added 'none' option for data and errors
# 2012-03-20 16:33 IJMC: Fixed bug; xmode=='none' now works.
# 2012-03-27 14:00 IJMC: Now using np.digitize -- speed boost.
# Rewrote code to optimize (somewhat),
# cleaned up 'import' statements.
# 2012-04-08 15:57 IJMC: New speed boost from adopting
# numpy.histogram-like implementation:
# numpy.searchsorted, etc.
#from analysis import removeoutliers
import numpy as np
if timing:
import time
tic = time.time()
def sdom(data):
"""Return standard deviation of the mean."""
return np.std(data)/np.sqrt(data.size)
def getcenter(data, cmode):
"""Get data center based on mode. Helper function."""
if cmode is None:
ret = 0
elif cmode=='mean':
ret = np.mean(data)
elif cmode=='median':
ret = np.median(data)
elif cmode=='sum':
ret = np.sum(data)
return ret
def geterr(data, emode, cmode):
"""Get errorbar. Helper function."""
if emode is None:
ret = []
elif emode=='std':
ret = np.std(data)
elif emode=='sdom':
ret = sdom(data)
elif emode=='minmax':
if len(data)==0:
ret = [np.nan, np.nan]
else:
center = getcenter(data,cmode)
ret = [center-min(data), max(data)-center]
return ret
def cleandata(data, clean, returnstats=False):
"""Clean data using removeoutliers. Helper function."""
init_count = np.array(data).size
if clean==None: # Don't clean at all!
#clean = dict(nsigma=1000, niter=0)
if returnstats:
ret = data, (init_count, init_count)
else:
ret = data
else: # Clean the data somehow ('clean' must be a dict)
if not clean.has_key('nsigma'):
clean.update(dict(nsigma=99999))
data = removeoutliers(data, **clean)
if returnstats:
ret = data, (init_count, np.array(data).size)
else:
ret = data
return ret
if timing:
print "%1.3f sec since starting function; helpers defined" % (time.time() - tic)
####### Begin main function ##########
sorted_index = np.argsort(x)
x = np.array(x, copy=False)[sorted_index]
y = np.array(y, copy=False)[sorted_index]
#x = np.array(x,copy=True).ravel()
#y = np.array(y,copy=True).ravel()
xbins = np.array(xbins,copy=True).ravel()
if xbins[0]==None and binfactor==None:
if returnstats ==False:
ret = x, y, np.ones(x.shape)*np.nan, np.ones(y.shape)*np.nan
else:
ret = x, y, np.ones(x.shape)*np.nan, np.ones(y.shape)*np.nan, (x.size, x.size)
return ret
if binfactor==None: # used passed-in 'xbins'
xbins = np.sort(xbins)
elif hasattr(binfactor,'__iter__'): # use variable-sized bins
binfactor = np.array(binfactor).copy()
sortedx = np.sort(x)
betweens = np.hstack((x.min()-1, 0.5*(sortedx[1::]+sortedx[0:len(x)-1]), x.max()+1))
xbins = []
counter = 0
for ii in range(len(binfactor)):
thisbin = betweens[counter]
xbins.append(thisbin)
counter += binfactor[ii]
xbins.append(x.max() + 1)
else: # bin down by the same factor throughout
binfactor = int(binfactor)
sortedx = np.sort(x)
betweens = np.hstack((x.min()-1, 0.5*(sortedx[1::]+sortedx[0:len(x)-1]), x.max()+1))
xbins = betweens[::binfactor]
if timing:
print "%1.3f sec since starting function; bins defined" % (time.time() - tic)
nbins = len(xbins)-1
arraynan = np.array([np.nan])
exx = []
eyy = []
xx = np.zeros(nbins)
yy = np.zeros(nbins)
yy2 = np.zeros(nbins)
init_count, final_count = y.size, 0
if timing:
setuptime = 0
xdatatime = 0
ydatatime = 0
statstime = 0
#import pylab as py
#xxx = np.sort(x)
if timing: tic1 = time.time()
#inds = np.digitize(x, xbins)
inds2 = [[x.searchsorted(xbins[ii], side='left'), \
x.searchsorted(xbins[ii+1], side='left')] for ii in range(nbins)]
if timing: setuptime += (time.time() - tic1)
#pdb.set_trace()
#bin_means = [data[digitized == i].mean() for i in range(1, len(bins))]
dox = xmode is not None
doy = ymode is not None
doex = xerr is not None
doey = yerr is not None
if clean is None:
if timing: tic3 = time.time()
if dox: exec ('xfunc = np.%s' % xmode) in locals()
if doy: exec ('yfunc = np.%s' % ymode) in locals()
for ii in range(nbins):
#index = inds==(ii+1)
if dox:
#xx[ii] = xfunc(x[index])
xx[ii] = xfunc(x[inds2[ii][0]:inds2[ii][1]])
if doy:
#yy[ii] = yfunc(y[index])
yy[ii] = yfunc(y[inds2[ii][0]:inds2[ii][1]])
if doex:
#exx.append(geterr(x[index], xerr, xmode))
exx.append(geterr(x[inds2[ii][0]:inds2[ii][1]], xerr, xmode))
if doey:
#eyy.append(geterr(y[index], yerr, ymode))
eyy.append(geterr(y[inds2[ii][0]:inds2[ii][1]], yerr, ymode))
if timing: statstime += (time.time() - tic3)
#pdb.set_trace()
else:
for ii in range(nbins):
if timing: tic1 = time.time()
#index = inds==(ii+1)
if timing: setuptime += (time.time() - tic1)
if timing: tic2 = time.time()
xdata = x[inds2[ii][0]:inds2[ii][1]]
if timing: xdatatime += (time.time() - tic2)
if timing: tic25 = time.time()
if ymode is None and yerr is None: # We're free to ignore the y-data:
ydata = arraynan
else: # We have to compute something with the y-data:
if clean is not None:
ydata, retstats = cleandata(y[inds2[ii][0]:inds2[ii][1]], clean, returnstats=True)
if returnstats:
final_count += retstats[1]
else: # We don't have to clean the data
ydata = y[inds2[ii][0]:inds2[ii][1]]
if returnstats:
final_count += ydata.size
if timing: ydatatime += (time.time() - tic25)
if timing: tic3 = time.time()
xx[ii] = getcenter(xdata,xmode)
if timing: tic4 = time.time()
yy[ii] = getcenter(ydata,ymode)
if timing: tic5 = time.time()
exx.append(geterr( xdata,xerr,xmode))
if timing: tic6 = time.time()
eyy.append(geterr( ydata,yerr,ymode))
if timing: tic7 = time.time()
if timing: statstime += (time.time() - tic3)
#exx[ii] = geterr( xdata,xerr,xmode)
#eyy[ii] = geterr( ydata,yerr,ymode)
if timing:
print "%1.3f sec for setting up bins & indices..." % setuptime
print "%1.3f sec for getting x data clean and ready." % xdatatime
print "%1.3f sec for getting y data clean and ready." % ydatatime
#print "%1.3f sec for computing x-data statistics." % (tic4-tic3)
#print "%1.3f sec for computing y-data statistics." % (tic5-tic4)
#print "%1.3f sec for computing x-error statistics." % (tic6-tic5)
#print "%1.3f sec for computing y-error statistics." % (tic7-tic6)
print "%1.3f sec for computing statistics........." % statstime
if timing:
print "%1.3f sec since starting function; uncertainties defined" % (time.time() - tic)
#xx = array(xx)
#yy = array(yy)
exx = np.array(exx).transpose() # b/c 2D if minmax option used
eyy = np.array(eyy).transpose() # b/c 2D if minmax option used
#pdb.set_trace()
if returnstats:
ret= xx,yy,exx,eyy,(init_count, final_count)
else:
ret = xx,yy,exx,eyy
#print 'tools: returnstats, len(ret)>>', returnstats, len(ret)
if timing:
print "%1.3f sec since starting function; returning" % (time.time() - tic)
return ret
# ===========================================================================
def removeoutliers(data, nsigma, remove='both', center='mean', niter=500, retind=False, verbose=False):
"""Strip outliers from a dataset, iterating until converged.
Written by Ian Crossfield: www.lpl.arizona.edu/~ianc/python/index.html
:INPUT:
data -- 1D numpy array. data from which to remove outliers.
nsigma -- positive number. limit defining outliers: number of
standard deviations from center of data.
:OPTIONAL INPUTS:
remove -- ('min'|'max'|'both') respectively removes outliers
below, above, or on both sides of the limits set by
nsigma.
center -- ('mean'|'median'|value) -- set central value, or
method to compute it.
niter -- number of iterations before exit; defaults to Inf,
which can occasionally result in empty arrays returned
retind -- (bool) whether to return index of good values as
second part of a 2-tuple.
:EXAMPLE:
::
from numpy import hist, linspace, randn
from analysis import removeoutliers
data = randn(1000)
hbins = linspace(-5,5,50)
d2 = removeoutliers(data, 1.5, niter=1)
hist(data, hbins)
hist(d2, hbins)
"""
# 2009-09-04 13:24 IJC: Created
# 2009-09-24 17:34 IJC: Added 'retind' feature. Tricky, but nice!
# 2009-10-01 10:40 IJC: Added check for stdev==0
# 2009-12-08 15:42 IJC: Added check for isfinite
from numpy import median, ones, isfinite
def getcen(data, method):
"Get central value of a 1D array (helper function)"
if method.__class__==str:
if method=='median':
cen = median(data)
else:
cen = data.mean()
else:
cen = method
return cen
def getgoodindex(data, nsigma, center, stdev, remove):
"Get number of outliers (helper function!)"
if stdev==0:
distance = data*0.0
else:
distance = (data-center)/stdev
if remove=='min':
goodind = distance>-nsigma
elif remove=='max':
goodind = distance<nsigma
else:
goodind = abs(distance)<=nsigma
return goodind
data = data.ravel().copy()
ndat0 = len(data)
ndat = len(data)
iter=0
goodind = ones(data.shape,bool)
goodind *= isfinite(data)
while ((ndat0<>ndat) or (iter==0)) and (iter<niter) and (ndat>0) :
ndat0 = len(data[goodind])
cen = getcen(data[goodind], center)
stdev = data[goodind].std()
thisgoodind = getgoodindex(data[goodind], nsigma, cen, stdev, remove)
goodind[find(goodind)] = thisgoodind
if verbose:
print "cen>>",cen
print "std>>",stdev
ndat = len(data[goodind])
iter +=1
if verbose:
print ndat0, ndat
if retind:
ret = data[goodind], goodind
else:
ret = data[goodind]
return ret
# ===========================================================================
def resample(old_dispersion, new_dispersion):
"""
Written by Andy Casey: https://github.com/andycasey
This program is found under sick/specutils.py
The output is a compressed sparse column matrix that tells the ratio between the old and new binning. To get the fluxes of the new binning: newflux = np.dot(oldflux,output.toarray())
Resample a spectrum to a new dispersion map while conserving total flux.
:param old_dispersion:
The original dispersion array.
:type old_dispersion:
:class:`numpy.array`
:param new_dispersion:
The new dispersion array to resample onto.
:type new_dispersion:
:class:`numpy.array`
"""
data = []
old_px_indices = []
new_px_indices = []
for i, new_wl_i in enumerate(new_dispersion):
# These indices should span just over the new wavelength pixel.
indices = np.unique(np.clip(
old_dispersion.searchsorted(new_dispersion[i:i + 2], side="left") \
+ [-1, +1], 0, old_dispersion.size - 1))
N = np.ptp(indices)
if N == 0:
# 'Fake' pixel.
data.append(np.nan)
new_px_indices.append(i)
old_px_indices.extend(indices)
continue
# Sanity checks.
assert (old_dispersion[indices[0]] <= new_wl_i \
or indices[0] == 0)
assert (new_wl_i <= old_dispersion[indices[1]] \
or indices[1] == old_dispersion.size - 1)
fractions = np.ones(N)
# Edges are handled as fractions between rebinned pixels.
_ = np.clip(i + 1, 0, new_dispersion.size - 1)
lhs = old_dispersion[indices[0]:indices[0] + 2]
rhs = old_dispersion[indices[-1] - 1:indices[-1] + 1]
fractions[0] = (lhs[1] - new_dispersion[i])/np.ptp(lhs)
fractions[-1] = (new_dispersion[_] - rhs[0])/np.ptp(rhs)
# Being binned to a single pixel. Prevent overflow from fringe cases.
fractions = np.clip(fractions, 0, 1)
fractions /= fractions.sum()
data.extend(fractions)
new_px_indices.extend([i] * N) # Mark the new pixel indices affected.
old_px_indices.extend(np.arange(*indices)) # And the old pixel indices.
return scipy.sparse.csc_matrix((data, (old_px_indices, new_px_indices)),
shape=(old_dispersion.size, new_dispersion.size))
| mit |
evanl/perc | perc_objects.py | 1 | 34291 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from mpl_toolkits.mplot3d import Axes3D
import random
import sys
import bisect
import read_eclipse as re
import eclipse_cells as ec
from time import time, clock
import csv
class Perc(object):
def __init__(self, nx, ny, nz, r_max = 10, volume_fraction = 1.0):
if nx >=3 and ny >=3:
self.nx = nx
self.ny = ny
else:
fail('expected nx >=3 and ny >=3, \n got \
nx = %d, ny= %d' %\
nx, ny)
if nz == 1:
self.nz = nz
elif nz >=3:
self.nz = nz
else:
fail('expected nz = 1 for 2d simulation or\
nz >=3 for 3d simulation \n \
got nz = %d' % nz)
self.r_max = r_max
self.x = {}
self.y = {}
self.z = {}
self.thres_z = {}
self.corners = {}
self.perm = {}
self.poro = {}
self.volume = {}
self.grid_values = {}
self.fill_steps = []
self.fill_times = []
self.candidates = [] #keep sorted
self.sbres = 0.2
self.scmax = 1 - self.sbres
self.vfrac = volume_fraction
def add_injection(self, mass_inflow, end_time_days, \
density):
self.inj = self.Injection(mass_inflow, end_time_days, \
density)
def add_volume(self, choice):
vol = self.vfrac * self.poro[choice] *\
self.scmax * self.volume[choice]
return vol
class Injection(object):
def __init__(self, mass_inflow, end_time_days, density):
self.t_elapsed = 1998 * 365.25 # days
self.q_index = 0
self.t_end = end_time_days + self.t_elapsed
self.rho = density
self.massflow = mass_inflow
#conversion factor
self.mr_kg_sec = []
self.q = []
self.end_days = []
for i in range(len(self.massflow)):
self.mr_kg_sec.append(self.massflow[i] * 31.71)
self.q.append(self.massflow[i] * 31.71 / self.rho) # -> m^3/s
self.end_days.append((1999 + i) * 365.25)
self.injected_mass = 0.
self.injected_volume = 0.
msum = 0.
for i in range(len(self.massflow)):
msum += self.massflow[i]
massflow_avg = msum / float(len(self.massflow))
self.max_mass = end_time_days * massflow_avg* 31.71 * 24 * 3600.
self.max_volume = self.max_mass / self.rho
def add_time(self, t_add):
self.t_elapsed += t_add
return 0
def add_mass(self, vol_add):
self.injected_volume += vol_add
mass_add = self.rho * vol_add
self.injected_mass += mass_add
time_taken = vol_add / (self.q[self.q_index] * 24 * 3600)
# add time in days ^^^
time_taken_1 = mass_add / (self.mr_kg_sec[self.q_index] * 24 * 3600)
self.add_time(time_taken)
if self.get_elapsed_time() > self.end_days[self.q_index] and \
self.q_index <= len(self.end_days) -1:
self.increment_q_index()
return 0
def increment_q_index(self):
self.q_index += 1
return 0
def get_elapsed_time(self):
return self.t_elapsed
def get_max_mass(self):
return self.max_mass
def get_injected_mass(self):
return self.injected_mass
def get_injected_volume(self):
return self.injected_volume
def get_density(self):
return self.rho
def get_mass_inflow(self):
return self.massflow
def get_end_time(self):
return self.t_end
def end_reached(self):
if self.t_elapsed > self.t_end:
return True
else:
return False
class Corner(object):
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def get_x(self):
return self.x
def get_y(self):
return self.y
def get_z(self):
return self.z
def get_grid_value(self, key):
""" returns the value for a given cell
creates this value if it doesn't exist.
"""
#if key not in self.grid_values:
#self.set_grid_value(key)
return self.grid_values[key]
def set_grid_value(self, key, val = 'random'):
""" sets grid value, sets value as not filled
"""
if val == 'random':
self.grid_values[key] = random.randint(1, self.r_max)
else:
self.grid_values[key] = val
def mark_filled(self, key, time = '1'):
""" marks grid values as filled if they are
within the bounds of the grid size
"""
assert 0 <= key[0] < self.nx, \
'i coordinate out of range(%d vs %d)' % \
(key[0], self.nx)
assert 0 <= key[1] < self.ny, \
'j coordinate out of range(%d vs %d)' % \
(key[1], self.ny)
if self.nz == 1:
assert key[2] == 0, 'k must equal zero'
else:
assert 0 <= key[2] < self.nz,\
'k coordinate out of range (%d vs %d)' % \
(key[2], self.nz)
self.fill_steps.append(key)
self.fill_times.append(time)
def find_new_candidates(self):
""" grabs neighbor cell values, inserts them into sorted list
"""
key = self.fill_steps[-1]
new_can = self.get_neighbor_candidates(key)
for can in new_can:
bisect.insort_left(self.candidates, (self.grid_values[can], can))
return self.candidates
def get_neighbor_candidates(self, key):
""" checks neighbor candidates, ignores if already in list
"""
neighbors = self.get_neighbor_keys(key)
candidates = []
for key in neighbors:
if key not in self.fill_steps:
candidates.append(key)
return candidates
def get_neighbor_keys(self, key):
""" Checks six sides of neighbors for 3d case
Checks four sides of neighbors for the 2d case
"""
keys = []
keys.append((key[0] - 1, key[1], key[2]))
keys.append((key[0] + 1, key[1], key[2]))
keys.append((key[0], key[1] - 1, key[2]))
keys.append((key[0], key[1] + 1, key[2]))
if self.nz != 1:
keys.append((key[0], key[1], key[2] - 1))
keys.append((key[0], key[1], key[2] + 1))
return keys
def end_criterion(self, end_type = 'boundary'):
if end_type == 'boundary':
if self.choice[0] in (0, self.nx-1) \
or self.choice[1] in (0, self.ny-1):
print "x-y Boundary hit "
return True
elif self.nz != 1 and self.choice[2] in (0, self.nz-1):
return True
else:
return False
elif end_type == 'injection':
end_time = self.inj.get_end_time()
elapsed = self.inj.get_elapsed_time()
if elapsed > end_time:
print "end criterion"
print "time elapsed: " + str(elapsed)
print " end time: " + str(end_time)
return True
elif self.end_criterion(end_type = 'boundary'):
return True
else:
return False
def run_simulation(self, injection = False):
""" fills grid. If no initial value is specified, picks
i, j, k == nx/2, ny/2, nz/2
"""
if injection == True:
end_type = 'injection'
else:
end_type = 'boundary'
print "PERCOLATING........"
step_count = 0
while True:
step_count +=1
self.candidates = self.find_new_candidates()
assert self.candidates, 'no fillable cells found'
self.choice = self.percolate()
time = step_count
if injection == True:
volume_filled = self.add_volume(self.choice)
self.inj.add_mass(volume_filled)
time = self.inj.get_elapsed_time()
self.mark_filled(self.choice, time = time)
if self.end_criterion(end_type = end_type):
print "Number of Cells filled: " + \
str(len(self.fill_steps))
print "mass in system : " + \
str(self.inj.get_injected_mass())
print "maximum mass : " + \
str(self.inj.get_max_mass())
break
return 0
def percolate(self):
choice = self.candidates[0][1]
#print choice, '{:.3e}'.format(self.grid_values[choice]),\
#" runner up -> ", self.candidates[1][1], \
#'{:.3e}'.format(self.grid_values[self.candidates[1][1]]),\
#" end", '{:.3e}'.format(self.grid_values[self.candidates[-1][1]])
self.candidates.remove(self.candidates[0])
return choice
def make_uniform_grid(self):
print "making uniform grid"
for i in range(self.nx):
for j in range(self.ny):
for k in range(self.nz):
key = (i, j, k)
self.set_grid_value(key)
self.x[key] = i
self.y[key] = j
self.z[key] = k
if len(self.fill_steps) == 0:
init_key = (self.nx/2, self.ny/2, self.nz/2)
self.mark_filled(init_key)
print "grid with: (nx, ny, nz) = ", \
(self.nx, self.ny, self.nz), " made!"
return 0
def make_sleipner_grid(self, vol_dict, xyz_dict, poroperm_dict):
""" sets :
self.x
self.y
self.z
self.poro
self.perm
"""
t0 = clock()
print "making Sleipner grid"
self.nx = 65
self.ny = 119
self.nz = 43
base_elev = xyz_dict[(32, 77, 34)][2]
for i in range(self.nx):
for j in range(self.ny):
for k in range(self.nz):
key = (i, j, k)
vol = vol_dict[key]
x = xyz_dict[key][0]
y = xyz_dict[key][1]
z = xyz_dict[key][2]
poro = poroperm_dict[key][0]
perm = poroperm_dict[key][1]
self.x[key] = x
self.y[key] = y
self.z[key] = z
self.thres_z[key] = base_elev - z
if j <=49:
boost = 0.0
self.z[key] += boost
self.thres_z[key] += boost
self.volume[key] = vol
self.poro[key] = poro
self.perm[key] = perm
#if perm > 0.1:
#self.perm[key] = 2000.
val = self.perc_threshold(key) + 1. * pow(10,5.)
#if i == 32 and j == 77:
#print '{:d}, {:.3e}, {:.3e}'.format(k, perm, val)
self.set_grid_value(key, val = val)
if len(self.fill_steps) == 0:
init_key = (32, 77, 34)
self.mark_filled(init_key, time = 1998. * 365.25 )
print "grid with: (nx, ny, nz) = ", \
(self.nx, self.ny, self.nz), " made in "
print clock() - t0, " seconds"
return 0
def contour_topo(self):
fig = plt.figure(figsize = (9., 12.))
ax = fig.add_subplot(111)
x = []
y = []
elev = []
for i in range(65):
b2 = []
b3 = []
blank = []
#if i >= 35 and i < 50:
for j in range(119):
#if j >= 45 and j < 75:
b2.append(self.x[(i, j, 2)])
b3.append(self.y[(i, j, 2)])
blank.append(self.z[(i, j, 2)])
elev.append(blank)
x.append(b2)
y.append(b3)
xp = np.asarray(x)
yp = np.asarray(y)
elp = np.asarray(elev)
N = 10
c = ax.contourf(xp, yp, elp, N)
cb = plt.colorbar(c, format='%.2f')
cb.set_ticks(np.linspace(np.amin(elp), np.amax(elp), N))
cb.set_label('elev [m]')
ax.set_xlabel('x [m]')
ax.set_ylabel('y [m]')
#plt.savefig('topo.png')
def perc_threshold(self, key):
# TODO
# ioannidis et al 1996.
c = 0.186
c = pow(10.,8.)
sigma = 0.045
#c = 1.
#sigma = 1.
pcd = c * sigma * \
pow(self.perm[key] / self.poro[key], -1/2.)
rho_b = 1019.
g = 9.81
delta_rho = rho_b - self.inj.get_density()
pgrav = delta_rho * g * (self.thres_z[key])
if key[0] == 32 and key[1] == 77:
a = 1
print "k, pcd, pgrav", "perm"
print '{:d}, {:3e}, {:3e}, {:3e}'.format(key[2], pcd, \
pgrav, pcd + pgrav)
return pcd + pgrav
def get_time_index_gravseg(self):
time_days = 0.
n = 0
time_indices = []
for i in range(1, len(self.fill_steps)):
key = self.fill_steps[i]
key0 = self.fill_steps[i-1]
if key[2] == 2 and key0[2] != 2:
time_indices.append(i)
n = time_indices[0]
time_days = self.fill_times[n]
return n, time_days
def get_plan_year_indices(self, years):
yr_indices = []
for year in years:
yr_days = (year) * 365.25
for n in range(0, len(self.fill_times)):
yr_ind = 0
if n > 0 and \
self.fill_times[n] > yr_days and \
self.fill_times[n-1] < yr_days:
yr_ind = n
yr_indices.append(yr_ind)
return yr_indices
def plot_sleipner_thick_contact(self, years, gwc = False, sim_title = ''):
if gwc == True:
tc_str = 'contact'
else:
tc_str = 'thickness'
yr_indices = self.get_plan_year_indices(years)
size = 14
font = {'size' : size}
matplotlib.rc('font', **font)
fig = plt.figure(figsize=(10.0, 2.5), dpi = 960)
middle = len(years) * 10
pos = 100 + middle
for n in range(len(yr_indices)):
pos +=1
ax = fig.add_subplot(pos)
xf = []
yf = []
kf = []
for i in range(self.nx):
tempx = []
tempy = []
tempk = []
for j in range(self.ny):
x = self.x[(i, j, 0)]
y = self.y[(i, j, 0)]
tn = yr_indices[n]
thick, contact = self.get_thick_contact(i, j, tn)
tempx.append(x)
tempy.append(y)
if gwc == True:
tempk.append(contact)
else:
tempk.append(thick)
xf.append(tempx)
yf.append(tempy)
kf.append(tempk)
xp = np.asarray(xf)
yp = np.asarray(yf)
kp = np.asarray(kf)
N = 10
contour_label = False
ax_label = False
c = ax.contourf(xp, yp, kp, N)
plt.tick_params(which='major', length=3, color = 'w')
if n == len(years) - 1:
fig.subplots_adjust(right=0.84)
cb_axes = fig.add_axes([0.85, 0.15, 0.05, 0.7])
plt.tick_params(which='major', length=3, color = 'k')
cb = fig.colorbar(c, cax = cb_axes, format = '%.2f')
cb.set_ticks(np.linspace(np.amin(kp), np.amax(kp), N))
cb.set_label(tc_str + ': [m]')
if n != 0:
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_title(str(years[n]))
ax.axis([0, 3000, 0, 6000])
ax.xaxis.set_ticks(np.arange(0,3500,1000))
plt.savefig(sim_title + '_' + tc_str + '.pdf', fmt = 'pdf')
plt.clf()
return 0
def plot_sleipner_plume(self, years, sim_title = 'sleipner_perc'):
yr_indices = self.get_plan_year_indices(years)
size = 14
font = {'size' : size}
matplotlib.rc('font', **font)
fig = plt.figure(figsize=(16.0, 5), dpi=960)
middle = len(years) * 10
pos = 100 + middle
for i in range(len(yr_indices)):
pos +=1
ax = fig.add_subplot(pos)
xf = []
yf = []
kf = []
for n in range(yr_indices[i]):
key = self.fill_steps[n]
#if key[0] >= 35 and key[0] < 50:
#if key[1] >= 45 and key[1] < 75:
xf.append(self.x[key])
yf.append(self.y[key])
kf.append(key[2])
if 50 == key[1]:
key1 = (key[0], key[1]-1, key[2])
xp = np.asarray(xf)
yp = np.asarray(yf)
sc = ax.scatter(xp, yp, s=20, c=kf)
ax.set_title(str(years[i]))
ax.axis([0, 3000, 0, 6000])
ax.xaxis.set_ticks(np.arange(0, 3000, 1500))
if i != 0:
ax.set_yticklabels([])
#elif i == 5:
#cb_axes = self.fig.add_axes([0.85, 0.15, 0.05, 0.7])
#fig.colorbar(sc, cax = cb_axes)
plt.savefig(sim_title + '_plume.pdf', fmt = 'pdf')
plt.clf()
return 0
def plot_sleipner_cross_section(self, years, sec_index = 32):
yr_indices = self.get_plan_year_indices(years)
size = 14
font = {'size' : size}
matplotlib.rc('font', **font)
fig = plt.figure(figsize=(16.0, 5))
pos = 150
top = []
bot = []
ybound = []
for key in self.x.keys():
if key[0] == sec_index:
t, b = self.get_boundary_zs(key[0], key[1])
top.append(t)
bot.append(b)
ybound.append(self.y[key])
for i in range(len(yr_indices)):
pos +=1
ax = fig.add_subplot(pos)
yf = []
zf = []
for n in range(yr_indices[i]):
key = self.fill_steps[n]
if key[0] == sec_index:
yf.append(self.y[key])
zf.append(self.z[key])
yp = np.asarray(yf)
zp = np.asarray(zf)
tp = np.asarray(top)
bp = np.asarray(bot)
yb = np.asarray(ybound)
tl = ax.scatter(yb, tp, s=5, c='r')
bl = ax.scatter(yb, bp, s=5, c='g')
sc = ax.scatter(yp, zp, s=10)
ax.set_title(str(years[i]))
ax.axis([0, 6000, -815, -800])
ax.xaxis.set_ticks(np.arange(0, 6000, 1500))
if i != 0:
ax.set_yticklabels([])
plt.savefig('sleipner_cross_section.png')
plt.clf()
return 0
def contour_top_boundary(self):
top = []
x = []
y = []
top = []
for i in range(self.nx):
xinter = []
yinter = []
tinter = []
for j in range(self.ny):
key = (i, j, 2)
xinter.append(self.x[key])
yinter.append(self.y[key])
tinter.append(self.z[key])
x.append(xinter)
y.append(yinter)
top.append(tinter)
xp = np.asarray(x)
yp = np.asarray(y)
tp = np.asarray(top)
fig = plt.figure(figsize=(8.5,11))
ax = fig.add_subplot(111)
N = 50
cs_val = ax.contour(xp, yp, tp, N)
cb_val = plt.colorbar(cs_val, shrink = 0.8,\
extend='both')
cb_val.set_label('Top Boundary [z]')
fig.savefig('top_boundary.png', bbox_inches='tight', format='png')
return 0
def make_scatter_plan_t0_tn(self, t0, tn):
n = tn-t0
x = np.zeros(n)
y = np.zeros(n)
for n in range(t0, tn):
key = self.fill_steps[n]
x[n] = self.x[key]
y[n] = self.y[key]
return x, y
def plot_2d(self, uniform_grid = True):
print "PLOTTING..........."
f = plt.figure()
ax = f.add_subplot(111)
# make base grid of cells
if uniform_grid == True:
pts = []
xs = []
ys = []
for i in [0, self.nx-1]:
for j in [0, self.ny-1]:
key = (i, j, 0)
xs.append(self.x[key])
ys.append(self.y[key])
xp = np.asarray(xs)
yp = np.asarray(ys)
ax.scatter(xp, yp, s=30, c='w', marker='s')
# go through steps and figure out times
xf = []
yf = []
tf = []
tmin = self.fill_times[0]
tmax = self.fill_times[-1]
for i in range(0, len(self.fill_steps)):
key = self.fill_steps[i]
xf.append(self.x[key])
yf.append(self.y[key])
tf.append(self.fill_times[i])
ax.set_xlabel('x')
ax.set_ylabel('y')
xfp = np.asarray(xf)
yfp = np.asarray(yf)
cm = plt.get_cmap('bone_r')
sc = ax.scatter(xfp, yfp, c = tf, vmin=tmin, vmax=tmax, s = 300, cmap=cm)
plt.colorbar(sc)
plt.savefig('sleipner_2d.png')
#plt.show()
def get_boundary_zs(self, i, j):
for k in range(1, self.nz):
key0 = (i, j, k-1)
key1 = (i, j, k)
if self.perm[key0] < 1. and self.perm[key1] > 1.:
ztop = self.z[key1]
elif self.perm[key0] > 1. and self.perm[key1] < 1.:
zbot = self.z[key0]
return ztop, zbot
def get_thick_contact(self, i, j, time_index):
column = []
for key in self.fill_steps[:time_index]:
if key[0] == i and key[1] == j:
column.append(self.z[key])
column.sort()
if len(column) == 0:
thick = 0.
contact = -812.
else:
thick = column[-1] - column[0] + 0.52
contact = column[0]
if contact < -812.:
contact = -812.
return thick, contact
def plot_3d(self, uniform_grid = True):
fig = plt.figure()
ax = fig.add_subplot(111, projection = '3d')
if uniform_grid == True:
pts = []
xs = []
ys = []
zs = []
for i in [0, self.nx-1]:
for j in [0, self.ny-1]:
for k in [0, self.nz-1]:
key = (i, j, k)
xs.append(self.x[key])
ys.append(self.y[key])
zs.append(self.z[key])
xp = np.asarray(xs)
yp = np.asarray(ys)
zp = np.asarray(zs)
ax.scatter(xp, yp, zp, s=30, c='w', marker='s')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
xf = []
yf = []
zf = []
tf = []
tmin = self.fill_times[0]
tmax = self.fill_times[-1]
for i in range(0, len(self.fill_steps)):
key = self.fill_steps[i]
xf.append(self.x[key])
yf.append(self.y[key])
zf.append(self.z[key])
tf.append(self.fill_times[i])
xfp = np.asarray(xf)
yfp = np.asarray(yf)
zfp = np.asarray(zf)
cm = plt.get_cmap('bone_r')
sc = ax.scatter(xfp, yfp, zfp, \
c = tf, vmin=tmin, vmax=tmax, s = 300, cmap=cm)
plt.colorbar(sc)
#plt.show()
return 0
def make_sleipner_csv(self):
e_cells, nx, ny, nz = re.read_eclipse()
f = open('sl_data.csv', 'w')
for i in range(nx):
for j in range(ny):
for k in range(nz):
key = (i, j, k)
ind = self.e_cell_index(i, j, k)
oc = e_cells[ind].getCorners()
corners = []
for c in oc:
x, y = c.getXY()
# FLIPPING ALL ZS IN THIS.
z = - c.getZ()
nc = self.Corner(x, y, z)
corners.append(nc)
self.corners[key] = corners
x = self.get_x_centroid(corners)
y = self.get_y_centroid(corners)
z = self.get_z_centroid(corners)
poro = e_cells[ind].getPorosity()
perm = e_cells[ind].getXPermeability()
volume = self.get_volume(x, y, z, corners)
vol_s = str(volume)
x_s = str(x)
y_s = str(y)
z_s = str(z)
poro_s = str(poro)
perm_s = str(perm)
f.write(', '.join([str(i), str(j), str(k), \
vol_s, x_s, y_s, z_s, poro_s, perm_s]))
f.write('\n')
f.close()
return 0
def read_sleipner_csv(self):
with open('sl_data.csv', 'rb') as csvfile:
vol_dict = {}
xyz_dict = {}
poroperm_dict = {}
rd = csv.reader(csvfile, delimiter = ',')
for row in rd:
key = (int(row[0]), int(row[1]), int(row[2]))
vol_dict[key] = float(row[3])
xyz_dict[key] = (float(row[4]), float(row[5]), float(row[6]))
poroperm_dict[key] = (float(row[7]), float(row[8]))
csvfile.close()
return vol_dict, xyz_dict, poroperm_dict
def e_cell_index(self, i, j, k):
nx = 65
ny = 119
return i + nx * j + nx * ny * k
def get_x_centroid(self, corners):
count = 0.
sum_c = 0.
for c in corners:
count += 1.
sum_c += c.get_x()
return sum_c / count
def get_y_centroid(self, corners):
count = 0.
sum_c = 0.
for c in corners:
count += 1.
sum_c += c.get_y()
return sum_c / count
def get_z_centroid(self, corners):
count = 0.
sum_c = 0.
for c in corners:
count += 1.
sum_c += c.get_z()
return sum_c / count
def get_dx(self, eleme, direc):
""" returns the length of a grid cell in a particular direction.
dir is either 1, 2 or 3 for x, y and z directions.
i, j and k are the indices
"""
if direc == 1 :
corners = self.corners[eleme]
dx = corners[0].get_x() - corners[1].get_x()
return dx
elif direc == 2 :
corners = self.corners[eleme]
dy = corners[0].get_y() - corners[2].get_y()
return dy
elif direc == 3 :
z1 = abs(e_cells[self.e_cell_index(i,j,k)].getTopZ() - \
e_cells[self.e_cell_index(i,j,k)].getBottomZ())
return z1
else:
raise Exception("Invalid direction, \n" + \
" Please specify 1, 2 or 3.\n")
def get_volume(self, x, y, z, corners):
""" uses the equation for volume of an orientable polyhedron
V = 1/3 \sum_i x_i \dot n^hat_i A_i
"""
face_map = ['west', 'south', 'east', 'north', 'bot', 'top']
v_sum = 0.0
for face in face_map:
a = self.get_area(corners, face)
centroid = self.get_face_center(x, y, z, corners, face)
cent = np.asarray(centroid)
vec = self.get_normal_vector(x, y, z, corners, face)
v_sum += np.dot(cent, vec) * a
vol = 1./3. * v_sum
return vol
def get_area(self, corners, face):
""" returns the area of a cell face, east, west, etc
"""
if face == 'west':
x1 = corners[2].get_y()
x2 = corners[0].get_y()
y1 = corners[2].get_z()
y2 = corners[0].get_z()
y3 = corners[6].get_z()
y4 = corners[4].get_z()
area = -self.get_area_side(x1, x2, y1, y2, y3, y4)
elif face == 'south':
x1 = corners[2].get_x()
x2 = corners[3].get_x()
y1 = corners[2].get_z()
y2 = corners[3].get_z()
y3 = corners[6].get_z()
y4 = corners[7].get_z()
area = -self.get_area_side(x1, x2, y1, y2, y3, y4)
elif face == 'east':
x1 = corners[3].get_y()
x2 = corners[1].get_y()
y1 = corners[3].get_z()
y2 = corners[1].get_z()
y3 = corners[7].get_z()
y4 = corners[5].get_z()
area = -self.get_area_side(x1, x2, y1, y2, y3, y4)
elif face == 'north':
x1 = corners[0].get_x()
x2 = corners[1].get_x()
y1 = corners[0].get_z()
y2 = corners[1].get_z()
y3 = corners[4].get_z()
y4 = corners[5].get_z()
area = -self.get_area_side(x1, x2, y1, y2, y3, y4)
elif face == 'bot':
nc = [corners[6], corners[7], corners[4], corners[5]]
c, resid, rank, sigma = self.fit_plane(nc)
mag = np.sqrt(pow(c[0],2.) + pow(c[1],2.) + 1)
x1 = corners[2].get_x()
x2 = corners[3].get_x()
y1 = corners[2].get_y()
y2 = corners[0].get_y()
area = mag * ((x2 * y2 - x1 * y2) - (x2 * y1 - x1 * y1))
elif face == 'top':
nc = [corners[2], corners[3], corners[0], corners[1]]
c, resid, rank, sigma = self.fit_plane(nc)
mag = np.sqrt(pow(c[0],2.) + pow(c[1],2.) + 1)
x1 = corners[6].get_x()
x2 = corners[7].get_x()
y1 = corners[6].get_y()
y2 = corners[4].get_y()
area = mag * ((x2 * y2 - x1 * y2) - (x2 * y1 - x1 * y1))
else:
raise Exception("Invalid Face, please specify" + \
"one of the six faces in face_map \n\n")
return area
def get_face_center(self, xc, yc, zc, corners, face):
""" center vector location relative to polyhedron center
"""
if face == 'west':
nc = [corners[0], corners[2], corners[4], corners[6]]
xf = self.get_x_centroid(nc)
yf = self.get_y_centroid(nc)
zf = self.get_z_centroid(nc)
elif face == 'south':
nc = [corners[2], corners[3], corners[6], corners[7]]
xf = self.get_x_centroid(nc)
yf = self.get_y_centroid(nc)
zf = self.get_z_centroid(nc)
a = 2
elif face == 'east':
nc = [corners[3], corners[1], corners[7], corners[5]]
xf = self.get_x_centroid(nc)
yf = self.get_y_centroid(nc)
zf = self.get_z_centroid(nc)
elif face == 'north':
nc = [corners[0], corners[1], corners[4], corners[5]]
xf = self.get_x_centroid(nc)
yf = self.get_y_centroid(nc)
zf = self.get_z_centroid(nc)
elif face == 'bot':
nc = [corners[6], corners[7], corners[4], corners[5]]
xf = self.get_x_centroid(nc)
yf = self.get_y_centroid(nc)
zf = self.get_z_centroid(nc)
elif face == 'top':
nc = [corners[2], corners[3], corners[0], corners[1]]
xf = self.get_x_centroid(nc)
yf = self.get_y_centroid(nc)
zf = self.get_z_centroid(nc)
else:
raise Exception("Invalid Face, please specify" + \
"one of the six faces in face_map \n\n")
vec = [xf - xc, yf - yc, zf - zc]
return vec
def get_normal_vector(self, x, y, z, corners, face):
""" gets normal vector of face
"""
if face == 'west':
vec = [-1., 0., 0.]
elif face == 'south':
vec = [0., -1., 0.]
elif face == 'east':
vec = [1., 0., 0.]
elif face == 'north':
vec = [0., 1., 0.]
elif face == 'bot':
nc = [corners[6], corners[7], corners[4], corners[5]]
c, resid, rank, sigma = self.fit_plane(nc)
mag = np.sqrt(pow(c[0], 2.) + pow(c[1],2.) + 1)
vec = [c[0]/mag, c[1]/mag, -1./mag]
elif face == 'top':
nc = [corners[2], corners[3], corners[0], corners[1]]
c, resid, rank, sigma = self.fit_plane(nc)
mag = np.sqrt(pow(c[0], 2.) + pow(c[1],2.) + 1)
vec = [-c[0]/mag, -c[1]/mag, 1./mag]
else:
raise Exception("Invalid Face, please specify" + \
"one of the six faces in face_map \n\n")
return vec
def fit_plane(self, corners):
""" takes four corner points and fits a plane least squares to them
returns in form z = c[0] x + c[1] y + c[2]
"""
x = []
y = []
z = []
for c in corners:
x.append(c.get_x())
y.append(c.get_y())
z.append(c.get_z())
x = np.asarray(x)
y = np.asarray(y)
z = np.asarray(z)
A = np.column_stack((x, y, np.ones(x.size)))
c, resid, rank, sigma = np.linalg.lstsq(A, z)
return c, resid, rank, sigma
def get_area_side(self, x1, x2, y1, y2, y3, y4):
h = x2 - x1
b1 = y4 - y2
b2 = y3 - y1
return 0.5 * h * (b1 + b2)
def fail(msg):
'''print error and quit'''
print >> sys.stderr, msg
sys.exit(1)
| mit |
Eric89GXL/mne-python | mne/viz/_brain/tests/test_brain.py | 2 | 33467 | # -*- coding: utf-8 -*-
#
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Joan Massich <mailsik@gmail.com>
# Guillaume Favelier <guillaume.favelier@gmail.com>
# Oleh Kozynets <ok7mailbox@gmail.com>
#
# License: Simplified BSD
import os
import os.path as path
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from mne import (read_source_estimate, read_evokeds, read_cov,
read_forward_solution, pick_types_forward,
SourceEstimate, MixedSourceEstimate,
VolSourceEstimate)
from mne.minimum_norm import apply_inverse, make_inverse_operator
from mne.source_space import (read_source_spaces, vertex_to_mni,
setup_volume_source_space)
from mne.datasets import testing
from mne.utils import check_version
from mne.label import read_label
from mne.viz._brain import Brain, _LinkViewer, _BrainScraper, _LayeredMesh
from mne.viz._brain.colormap import calculate_lut
from matplotlib import cm, image
from matplotlib.lines import Line2D
import matplotlib.pyplot as plt
data_path = testing.data_path(download=False)
subject_id = 'sample'
subjects_dir = path.join(data_path, 'subjects')
fname_stc = path.join(data_path, 'MEG/sample/sample_audvis_trunc-meg')
fname_label = path.join(data_path, 'MEG/sample/labels/Vis-lh.label')
fname_cov = path.join(
data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif')
fname_evoked = path.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-ave.fif')
fname_fwd = path.join(
data_path, 'MEG', 'sample', 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
src_fname = path.join(data_path, 'subjects', 'sample', 'bem',
'sample-oct-6-src.fif')
class _Collection(object):
def __init__(self, actors):
self._actors = actors
def GetNumberOfItems(self):
return len(self._actors)
def GetItemAsObject(self, ii):
return self._actors[ii]
class TstVTKPicker(object):
"""Class to test cell picking."""
def __init__(self, mesh, cell_id, hemi, brain):
self.mesh = mesh
self.cell_id = cell_id
self.point_id = None
self.hemi = hemi
self.brain = brain
self._actors = ()
def GetCellId(self):
"""Return the picked cell."""
return self.cell_id
def GetDataSet(self):
"""Return the picked mesh."""
return self.mesh
def GetPickPosition(self):
"""Return the picked position."""
if self.hemi == 'vol':
self.point_id = self.cell_id
return self.brain._data['vol']['grid_coords'][self.cell_id]
else:
vtk_cell = self.mesh.GetCell(self.cell_id)
cell = [vtk_cell.GetPointId(point_id) for point_id
in range(vtk_cell.GetNumberOfPoints())]
self.point_id = cell[0]
return self.mesh.points[self.point_id]
def GetProp3Ds(self):
"""Return all picked Prop3Ds."""
return _Collection(self._actors)
def GetRenderer(self):
"""Return the "renderer"."""
return self # set this to also be the renderer and active camera
GetActiveCamera = GetRenderer
def GetPosition(self):
"""Return the position."""
return np.array(self.GetPickPosition()) - (0, 0, 100)
def test_layered_mesh(renderer_interactive):
"""Test management of scalars/colormap overlay."""
if renderer_interactive._get_3d_backend() != 'pyvista':
pytest.skip('TimeViewer tests only supported on PyVista')
mesh = _LayeredMesh(
renderer=renderer_interactive._get_renderer(size=[300, 300]),
vertices=np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0]]),
triangles=np.array([[0, 1, 2], [1, 2, 3]]),
normals=np.array([[0, 0, 1]] * 4),
)
assert not mesh._is_mapped
mesh.map()
assert mesh._is_mapped
assert mesh._cache is None
mesh.update()
assert len(mesh._overlays) == 0
mesh.add_overlay(
scalars=np.array([0, 1, 1, 0]),
colormap=np.array([(1, 1, 1, 1), (0, 0, 0, 0)]),
rng=None,
opacity=None,
name='test',
)
assert mesh._cache is not None
assert len(mesh._overlays) == 1
assert 'test' in mesh._overlays
mesh.remove_overlay('test')
assert len(mesh._overlays) == 0
mesh._clean()
@testing.requires_testing_data
def test_brain_gc(renderer, brain_gc):
"""Test that a minimal version of Brain gets GC'ed."""
if renderer._get_3d_backend() != 'pyvista':
pytest.skip('TimeViewer tests only supported on PyVista')
brain = Brain('fsaverage', 'both', 'inflated', subjects_dir=subjects_dir)
brain.close()
@testing.requires_testing_data
def test_brain_init(renderer, tmpdir, pixel_ratio, brain_gc):
"""Test initialization of the Brain instance."""
if renderer._get_3d_backend() != 'pyvista':
pytest.skip('TimeViewer tests only supported on PyVista')
from mne.source_estimate import _BaseSourceEstimate
class FakeSTC(_BaseSourceEstimate):
def __init__(self):
pass
hemi = 'lh'
surf = 'inflated'
cortex = 'low_contrast'
title = 'test'
size = (300, 300)
kwargs = dict(subject_id=subject_id, subjects_dir=subjects_dir)
with pytest.raises(ValueError, match='"size" parameter must be'):
Brain(hemi=hemi, surf=surf, size=[1, 2, 3], **kwargs)
with pytest.raises(KeyError):
Brain(hemi='foo', surf=surf, **kwargs)
with pytest.raises(TypeError, match='figure'):
Brain(hemi=hemi, surf=surf, figure='foo', **kwargs)
with pytest.raises(TypeError, match='interaction'):
Brain(hemi=hemi, surf=surf, interaction=0, **kwargs)
with pytest.raises(ValueError, match='interaction'):
Brain(hemi=hemi, surf=surf, interaction='foo', **kwargs)
renderer.backend._close_all()
brain = Brain(hemi=hemi, surf=surf, size=size, title=title,
cortex=cortex, units='m', **kwargs)
with pytest.raises(TypeError, match='not supported'):
brain._check_stc(hemi='lh', array=FakeSTC(), vertices=None)
with pytest.raises(ValueError, match='add_data'):
brain.setup_time_viewer(time_viewer=True)
brain._hemi = 'foo' # for testing: hemis
with pytest.raises(ValueError, match='not be None'):
brain._check_hemi(hemi=None)
with pytest.raises(ValueError, match='either "lh" or "rh"'):
brain._check_hemi(hemi='foo')
with pytest.raises(ValueError, match='either "lh" or "rh"'):
brain._check_hemis(hemi='foo')
brain._hemi = hemi # end testing: hemis
with pytest.raises(ValueError, match='bool or positive'):
brain._to_borders(None, None, 'foo')
assert brain.interaction == 'trackball'
# add_data
stc = read_source_estimate(fname_stc)
fmin = stc.data.min()
fmax = stc.data.max()
for h in brain._hemis:
if h == 'lh':
hi = 0
else:
hi = 1
hemi_data = stc.data[:len(stc.vertices[hi]), 10]
hemi_vertices = stc.vertices[hi]
with pytest.raises(TypeError, match='scale_factor'):
brain.add_data(hemi_data, hemi=h, scale_factor='foo')
with pytest.raises(TypeError, match='vector_alpha'):
brain.add_data(hemi_data, hemi=h, vector_alpha='foo')
with pytest.raises(ValueError, match='thresh'):
brain.add_data(hemi_data, hemi=h, thresh=-1)
with pytest.raises(ValueError, match='remove_existing'):
brain.add_data(hemi_data, hemi=h, remove_existing=-1)
with pytest.raises(ValueError, match='time_label_size'):
brain.add_data(hemi_data, hemi=h, time_label_size=-1,
vertices=hemi_vertices)
with pytest.raises(ValueError, match='is positive'):
brain.add_data(hemi_data, hemi=h, smoothing_steps=-1,
vertices=hemi_vertices)
with pytest.raises(TypeError, match='int or NoneType'):
brain.add_data(hemi_data, hemi=h, smoothing_steps='foo')
with pytest.raises(ValueError, match='dimension mismatch'):
brain.add_data(array=np.array([0, 1, 2]), hemi=h,
vertices=hemi_vertices)
with pytest.raises(ValueError, match='vertices parameter must not be'):
brain.add_data(hemi_data, fmin=fmin, hemi=hemi,
fmax=fmax, vertices=None)
with pytest.raises(ValueError, match='has shape'):
brain.add_data(hemi_data[:, np.newaxis], fmin=fmin, hemi=hemi,
fmax=fmax, vertices=None, time=[0, 1])
brain.add_data(hemi_data, fmin=fmin, hemi=h, fmax=fmax,
colormap='hot', vertices=hemi_vertices,
smoothing_steps='nearest', colorbar=(0, 0), time=None)
with pytest.raises(ValueError, match='brain has no defined times'):
brain.set_time(0.)
assert brain.data['lh']['array'] is hemi_data
assert brain.views == ['lateral']
assert brain.hemis == ('lh',)
brain.add_data(hemi_data[:, np.newaxis], fmin=fmin, hemi=h, fmax=fmax,
colormap='hot', vertices=hemi_vertices,
smoothing_steps=1, initial_time=0., colorbar=False,
time=[0])
with pytest.raises(ValueError, match='the range of available times'):
brain.set_time(7.)
brain.set_time(0.)
brain.set_time_point(0) # should hit _safe_interp1d
with pytest.raises(ValueError, match='consistent with'):
brain.add_data(hemi_data[:, np.newaxis], fmin=fmin, hemi=h,
fmax=fmax, colormap='hot', vertices=hemi_vertices,
smoothing_steps='nearest', colorbar=False,
time=[1])
with pytest.raises(ValueError, match='different from'):
brain.add_data(hemi_data[:, np.newaxis][:, [0, 0]],
fmin=fmin, hemi=h, fmax=fmax, colormap='hot',
vertices=hemi_vertices)
with pytest.raises(ValueError, match='need shape'):
brain.add_data(hemi_data[:, np.newaxis], time=[0, 1],
fmin=fmin, hemi=h, fmax=fmax, colormap='hot',
vertices=hemi_vertices)
with pytest.raises(ValueError, match='If array has 3'):
brain.add_data(hemi_data[:, np.newaxis, np.newaxis],
fmin=fmin, hemi=h, fmax=fmax, colormap='hot',
vertices=hemi_vertices)
# add label
label = read_label(fname_label)
with pytest.raises(ValueError, match="not a filename"):
brain.add_label(0)
with pytest.raises(ValueError, match="does not exist"):
brain.add_label('foo', subdir='bar')
label.name = None # test unnamed label
brain.add_label(label, scalar_thresh=0.)
assert isinstance(brain.labels[label.hemi], list)
assert 'unnamed' in brain._layered_meshes[label.hemi]._overlays
brain.remove_labels()
brain.add_label(fname_label)
brain.add_label('V1', borders=True)
brain.remove_labels()
brain.remove_labels()
# add foci
brain.add_foci([0], coords_as_verts=True,
hemi=hemi, color='blue')
# add text
brain.add_text(x=0, y=0, text='foo')
brain.close()
# add annotation
annots = ['aparc', path.join(subjects_dir, 'fsaverage', 'label',
'lh.PALS_B12_Lobes.annot')]
borders = [True, 2]
alphas = [1, 0.5]
colors = [None, 'r']
brain = Brain(subject_id='fsaverage', hemi='both', size=size,
surf='inflated', subjects_dir=subjects_dir)
with pytest.raises(RuntimeError, match="both hemispheres"):
brain.add_annotation(annots[-1])
with pytest.raises(ValueError, match="does not exist"):
brain.add_annotation('foo')
brain.close()
brain = Brain(subject_id='fsaverage', hemi=hemi, size=size,
surf='inflated', subjects_dir=subjects_dir)
for a, b, p, color in zip(annots, borders, alphas, colors):
brain.add_annotation(a, b, p, color=color)
brain.show_view(dict(focalpoint=(1e-5, 1e-5, 1e-5)), roll=1, distance=500)
# image and screenshot
fname = path.join(str(tmpdir), 'test.png')
assert not path.isfile(fname)
brain.save_image(fname)
assert path.isfile(fname)
brain.show_view(view=dict(azimuth=180., elevation=90.))
img = brain.screenshot(mode='rgb')
if renderer._get_3d_backend() == 'mayavi':
pixel_ratio = 1. # no HiDPI when using the testing backend
want_size = np.array([size[0] * pixel_ratio, size[1] * pixel_ratio, 3])
assert_allclose(img.shape, want_size)
brain.close()
@testing.requires_testing_data
@pytest.mark.skipif(os.getenv('CI_OS_NAME', '') == 'osx',
reason='Unreliable/segfault on macOS CI')
@pytest.mark.parametrize('hemi', ('lh', 'rh'))
def test_single_hemi(hemi, renderer_interactive, brain_gc):
"""Test single hemi support."""
if renderer_interactive._get_3d_backend() != 'pyvista':
pytest.skip('TimeViewer tests only supported on PyVista')
stc = read_source_estimate(fname_stc)
idx, order = (0, 1) if hemi == 'lh' else (1, -1)
stc = SourceEstimate(
getattr(stc, f'{hemi}_data'), [stc.vertices[idx], []][::order],
0, 1, 'sample')
brain = stc.plot(
subjects_dir=subjects_dir, hemi='both', size=300)
brain.close()
# test skipping when len(vertices) == 0
stc.vertices[1 - idx] = np.array([])
brain = stc.plot(
subjects_dir=subjects_dir, hemi=hemi, size=300)
brain.close()
@testing.requires_testing_data
@pytest.mark.slowtest
def test_brain_save_movie(tmpdir, renderer, brain_gc):
"""Test saving a movie of a Brain instance."""
if renderer._get_3d_backend() == "mayavi":
pytest.skip('Save movie only supported on PyVista')
brain = _create_testing_brain(hemi='lh', time_viewer=False)
filename = str(path.join(tmpdir, "brain_test.mov"))
for interactive_state in (False, True):
# for coverage, we set interactivity
if interactive_state:
brain._renderer.plotter.enable()
else:
brain._renderer.plotter.disable()
with pytest.raises(TypeError, match='unexpected keyword argument'):
brain.save_movie(filename, time_dilation=1, tmin=1, tmax=1.1,
bad_name='blah')
assert not path.isfile(filename)
brain.save_movie(filename, time_dilation=0.1,
interpolation='nearest')
assert path.isfile(filename)
os.remove(filename)
brain.close()
@testing.requires_testing_data
@pytest.mark.slowtest
def test_brain_time_viewer(renderer_interactive, pixel_ratio, brain_gc):
"""Test time viewer primitives."""
if renderer_interactive._get_3d_backend() != 'pyvista':
pytest.skip('TimeViewer tests only supported on PyVista')
with pytest.raises(ValueError, match="between 0 and 1"):
_create_testing_brain(hemi='lh', show_traces=-1.0)
with pytest.raises(ValueError, match="got unknown keys"):
_create_testing_brain(hemi='lh', surf='white', src='volume',
volume_options={'foo': 'bar'})
brain = _create_testing_brain(hemi='both', show_traces=False)
# test sub routines when show_traces=False
brain._on_pick(None, None)
brain._configure_vertex_time_course()
brain._configure_label_time_course()
brain.setup_time_viewer() # for coverage
brain.callbacks["time"](value=0)
brain.callbacks["orientation_lh_0_0"](
value='lat',
update_widget=True
)
brain.callbacks["orientation_lh_0_0"](
value='medial',
update_widget=True
)
brain.callbacks["time"](
value=0.0,
time_as_index=False,
)
brain.callbacks["smoothing"](value=1)
brain.callbacks["fmin"](value=12.0)
brain.callbacks["fmax"](value=4.0)
brain.callbacks["fmid"](value=6.0)
brain.callbacks["fmid"](value=4.0)
brain.callbacks["fscale"](value=1.1)
brain.callbacks["fmin"](value=12.0)
brain.callbacks["fmid"](value=4.0)
brain.toggle_interface()
brain.toggle_interface(value=False)
brain.callbacks["playback_speed"](value=0.1)
brain.toggle_playback()
brain.toggle_playback(value=False)
brain.apply_auto_scaling()
brain.restore_user_scaling()
brain.reset()
plt.close('all')
brain.help()
assert len(plt.get_fignums()) == 1
plt.close('all')
assert len(plt.get_fignums()) == 0
# screenshot
brain.show_view(view=dict(azimuth=180., elevation=90.))
img = brain.screenshot(mode='rgb')
want_shape = np.array([300 * pixel_ratio, 300 * pixel_ratio, 3])
assert_allclose(img.shape, want_shape)
brain.close()
@testing.requires_testing_data
@pytest.mark.parametrize('hemi', [
'lh',
pytest.param('rh', marks=pytest.mark.slowtest),
pytest.param('split', marks=pytest.mark.slowtest),
pytest.param('both', marks=pytest.mark.slowtest),
])
@pytest.mark.parametrize('src', [
'surface',
pytest.param('vector', marks=pytest.mark.slowtest),
pytest.param('volume', marks=pytest.mark.slowtest),
pytest.param('mixed', marks=pytest.mark.slowtest),
])
@pytest.mark.slowtest
def test_brain_traces(renderer_interactive, hemi, src, tmpdir,
brain_gc):
"""Test brain traces."""
if renderer_interactive._get_3d_backend() != 'pyvista':
pytest.skip('Only PyVista supports traces')
hemi_str = list()
if src in ('surface', 'vector', 'mixed'):
hemi_str.extend([hemi] if hemi in ('lh', 'rh') else ['lh', 'rh'])
if src in ('mixed', 'volume'):
hemi_str.extend(['vol'])
# label traces
brain = _create_testing_brain(
hemi=hemi, surf='white', src=src, show_traces='label',
volume_options=None, # for speed, don't upsample
n_time=5, initial_time=0,
)
if src == 'surface':
brain._data['src'] = None # test src=None
if src in ('surface', 'vector', 'mixed'):
assert brain.show_traces
assert brain.traces_mode == 'label'
brain._label_mode_widget.setCurrentText('max')
# test picking a cell at random
rng = np.random.RandomState(0)
for idx, current_hemi in enumerate(hemi_str):
if current_hemi == 'vol':
continue
current_mesh = brain._layered_meshes[current_hemi]._polydata
cell_id = rng.randint(0, current_mesh.n_cells)
test_picker = TstVTKPicker(
current_mesh, cell_id, current_hemi, brain)
assert len(brain.picked_patches[current_hemi]) == 0
brain._on_pick(test_picker, None)
assert len(brain.picked_patches[current_hemi]) == 1
for label_id in list(brain.picked_patches[current_hemi]):
label = brain._annotation_labels[current_hemi][label_id]
assert isinstance(label._line, Line2D)
brain._label_mode_widget.setCurrentText('mean')
brain.clear_glyphs()
assert len(brain.picked_patches[current_hemi]) == 0
brain._on_pick(test_picker, None) # picked and added
assert len(brain.picked_patches[current_hemi]) == 1
brain._on_pick(test_picker, None) # picked again so removed
assert len(brain.picked_patches[current_hemi]) == 0
# test switching from 'label' to 'vertex'
brain._annot_cands_widget.setCurrentText('None')
brain._label_mode_widget.setCurrentText('max')
else: # volume
assert brain._trace_mode_widget is None
assert brain._annot_cands_widget is None
assert brain._label_mode_widget is None
brain.close()
# test colormap
if src != 'vector':
brain = _create_testing_brain(
hemi=hemi, surf='white', src=src, show_traces=0.5, initial_time=0,
volume_options=None, # for speed, don't upsample
n_time=1 if src == 'mixed' else 5, diverging=True,
add_data_kwargs=dict(colorbar_kwargs=dict(n_labels=3)),
)
# mne_analyze should be chosen
ctab = brain._data['ctable']
assert_array_equal(ctab[0], [0, 255, 255, 255]) # opaque cyan
assert_array_equal(ctab[-1], [255, 255, 0, 255]) # opaque yellow
assert_allclose(ctab[len(ctab) // 2], [128, 128, 128, 0], atol=3)
brain.close()
# vertex traces
brain = _create_testing_brain(
hemi=hemi, surf='white', src=src, show_traces=0.5, initial_time=0,
volume_options=None, # for speed, don't upsample
n_time=1 if src == 'mixed' else 5,
add_data_kwargs=dict(colorbar_kwargs=dict(n_labels=3)),
)
assert brain.show_traces
assert brain.traces_mode == 'vertex'
assert hasattr(brain, "picked_points")
assert hasattr(brain, "_spheres")
assert brain.plotter.scalar_bar.GetNumberOfLabels() == 3
# add foci should work for volumes
brain.add_foci([[0, 0, 0]], hemi='lh' if src == 'surface' else 'vol')
# test points picked by default
picked_points = brain.get_picked_points()
spheres = brain._spheres
for current_hemi in hemi_str:
assert len(picked_points[current_hemi]) == 1
n_spheres = len(hemi_str)
if hemi == 'split' and src in ('mixed', 'volume'):
n_spheres += 1
assert len(spheres) == n_spheres
# test switching from 'vertex' to 'label'
if src == 'surface':
brain._annot_cands_widget.setCurrentText('aparc')
brain._annot_cands_widget.setCurrentText('None')
# test removing points
brain.clear_glyphs()
assert len(spheres) == 0
for key in ('lh', 'rh', 'vol'):
assert len(picked_points[key]) == 0
# test picking a cell at random
rng = np.random.RandomState(0)
for idx, current_hemi in enumerate(hemi_str):
assert len(spheres) == 0
if current_hemi == 'vol':
current_mesh = brain._data['vol']['grid']
vertices = brain._data['vol']['vertices']
values = current_mesh.cell_arrays['values'][vertices]
cell_id = vertices[np.argmax(np.abs(values))]
else:
current_mesh = brain._layered_meshes[current_hemi]._polydata
cell_id = rng.randint(0, current_mesh.n_cells)
test_picker = TstVTKPicker(None, None, current_hemi, brain)
assert brain._on_pick(test_picker, None) is None
test_picker = TstVTKPicker(
current_mesh, cell_id, current_hemi, brain)
assert cell_id == test_picker.cell_id
assert test_picker.point_id is None
brain._on_pick(test_picker, None)
brain._on_pick(test_picker, None)
assert test_picker.point_id is not None
assert len(picked_points[current_hemi]) == 1
assert picked_points[current_hemi][0] == test_picker.point_id
assert len(spheres) > 0
sphere = spheres[-1]
vertex_id = sphere._vertex_id
assert vertex_id == test_picker.point_id
line = sphere._line
hemi_prefix = current_hemi[0].upper()
if current_hemi == 'vol':
assert hemi_prefix + ':' in line.get_label()
assert 'MNI' in line.get_label()
continue # the MNI conversion is more complex
hemi_int = 0 if current_hemi == 'lh' else 1
mni = vertex_to_mni(
vertices=vertex_id,
hemis=hemi_int,
subject=brain._subject_id,
subjects_dir=brain._subjects_dir
)
label = "{}:{} MNI: {}".format(
hemi_prefix, str(vertex_id).ljust(6),
', '.join('%5.1f' % m for m in mni))
assert line.get_label() == label
# remove the sphere by clicking in its vicinity
old_len = len(spheres)
test_picker._actors = sum((s._actors for s in spheres), [])
brain._on_pick(test_picker, None)
assert len(spheres) < old_len
screenshot = brain.screenshot()
screenshot_all = brain.screenshot(time_viewer=True)
assert screenshot.shape[0] < screenshot_all.shape[0]
# and the scraper for it (will close the instance)
# only test one condition to save time
if not (hemi == 'rh' and src == 'surface' and
check_version('sphinx_gallery')):
brain.close()
return
fnames = [str(tmpdir.join(f'temp_{ii}.png')) for ii in range(2)]
block_vars = dict(image_path_iterator=iter(fnames),
example_globals=dict(brain=brain))
block = ('code', """
something
# brain.save_movie(time_dilation=1, framerate=1,
# interpolation='linear', time_viewer=True)
#
""", 1)
gallery_conf = dict(src_dir=str(tmpdir), compress_images=[])
scraper = _BrainScraper()
rst = scraper(block, block_vars, gallery_conf)
assert brain.plotter is None # closed
gif_0 = fnames[0][:-3] + 'gif'
for fname in (gif_0, fnames[1]):
assert path.basename(fname) in rst
assert path.isfile(fname)
img = image.imread(fname)
assert img.shape[1] == screenshot.shape[1] # same width
assert img.shape[0] > screenshot.shape[0] # larger height
assert img.shape[:2] == screenshot_all.shape[:2]
@testing.requires_testing_data
@pytest.mark.slowtest
def test_brain_linkviewer(renderer_interactive, brain_gc):
"""Test _LinkViewer primitives."""
if renderer_interactive._get_3d_backend() != 'pyvista':
pytest.skip('Linkviewer only supported on PyVista')
brain1 = _create_testing_brain(hemi='lh', show_traces=False)
brain2 = _create_testing_brain(hemi='lh', show_traces='separate')
brain1._times = brain1._times * 2
with pytest.warns(RuntimeWarning, match='linking time'):
link_viewer = _LinkViewer(
[brain1, brain2],
time=True,
camera=False,
colorbar=False,
picking=False,
)
brain_data = _create_testing_brain(hemi='split', show_traces='vertex')
link_viewer = _LinkViewer(
[brain2, brain_data],
time=True,
camera=True,
colorbar=True,
picking=True,
)
link_viewer.set_time_point(value=0)
link_viewer.brains[0].mpl_canvas.time_func(0)
link_viewer.set_fmin(0)
link_viewer.set_fmid(0.5)
link_viewer.set_fmax(1)
link_viewer.set_playback_speed(value=0.1)
link_viewer.toggle_playback()
del link_viewer
brain1.close()
brain2.close()
brain_data.close()
def test_calculate_lut():
"""Test brain's colormap functions."""
colormap = "coolwarm"
alpha = 1.0
fmin = 0.0
fmid = 0.5
fmax = 1.0
center = None
calculate_lut(colormap, alpha=alpha, fmin=fmin,
fmid=fmid, fmax=fmax, center=center)
center = 0.0
colormap = cm.get_cmap(colormap)
calculate_lut(colormap, alpha=alpha, fmin=fmin,
fmid=fmid, fmax=fmax, center=center)
cmap = cm.get_cmap(colormap)
zero_alpha = np.array([1., 1., 1., 0])
half_alpha = np.array([1., 1., 1., 0.5])
atol = 1.5 / 256.
# fmin < fmid < fmax
lut = calculate_lut(colormap, alpha, 1, 2, 3)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0) * zero_alpha, atol=atol)
assert_allclose(lut[127], cmap(0.5), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# divergent
lut = calculate_lut(colormap, alpha, 0, 1, 2, 0)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0), atol=atol)
assert_allclose(lut[63], cmap(0.25), atol=atol)
assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[192], cmap(0.75), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# fmin == fmid == fmax
lut = calculate_lut(colormap, alpha, 1, 1, 1)
zero_alpha = np.array([1., 1., 1., 0])
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0) * zero_alpha, atol=atol)
assert_allclose(lut[1], cmap(0.5), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# divergent
lut = calculate_lut(colormap, alpha, 0, 0, 0, 0)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0), atol=atol)
assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# fmin == fmid < fmax
lut = calculate_lut(colormap, alpha, 1, 1, 2)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0.) * zero_alpha, atol=atol)
assert_allclose(lut[1], cmap(0.5), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# divergent
lut = calculate_lut(colormap, alpha, 1, 1, 2, 0)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0), atol=atol)
assert_allclose(lut[62], cmap(0.245), atol=atol)
assert_allclose(lut[64], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[191], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[193], cmap(0.755), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
lut = calculate_lut(colormap, alpha, 0, 0, 1, 0)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0), atol=atol)
assert_allclose(lut[126], cmap(0.25), atol=atol)
assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[129], cmap(0.75), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# fmin < fmid == fmax
lut = calculate_lut(colormap, alpha, 1, 2, 2)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0) * zero_alpha, atol=atol)
assert_allclose(lut[-2], cmap(0.5), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# divergent
lut = calculate_lut(colormap, alpha, 1, 2, 2, 0)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0), atol=atol)
assert_allclose(lut[1], cmap(0.25), atol=2 * atol)
assert_allclose(lut[32], cmap(0.375) * half_alpha, atol=atol)
assert_allclose(lut[64], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[191], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[223], cmap(0.625) * half_alpha, atol=atol)
assert_allclose(lut[-2], cmap(0.7475), atol=2 * atol)
assert_allclose(lut[-1], cmap(1.), atol=2 * atol)
lut = calculate_lut(colormap, alpha, 0, 1, 1, 0)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0), atol=atol)
assert_allclose(lut[1], cmap(0.25), atol=2 * atol)
assert_allclose(lut[64], cmap(0.375) * half_alpha, atol=atol)
assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[191], cmap(0.625) * half_alpha, atol=atol)
assert_allclose(lut[-2], cmap(0.75), atol=2 * atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
with pytest.raises(ValueError, match=r'.*fmin \(1\) <= fmid \(0\) <= fma'):
calculate_lut(colormap, alpha, 1, 0, 2)
def _create_testing_brain(hemi, surf='inflated', src='surface', size=300,
n_time=5, diverging=False, **kwargs):
assert src in ('surface', 'vector', 'mixed', 'volume')
meth = 'plot'
if src in ('surface', 'mixed'):
sample_src = read_source_spaces(src_fname)
klass = MixedSourceEstimate if src == 'mixed' else SourceEstimate
if src == 'vector':
fwd = read_forward_solution(fname_fwd)
fwd = pick_types_forward(fwd, meg=True, eeg=False)
evoked = read_evokeds(fname_evoked, baseline=(None, 0))[0]
noise_cov = read_cov(fname_cov)
free = make_inverse_operator(
evoked.info, fwd, noise_cov, loose=1.)
stc = apply_inverse(evoked, free, pick_ori='vector')
return stc.plot(
subject=subject_id, hemi=hemi, size=size,
subjects_dir=subjects_dir, colormap='auto',
**kwargs)
if src in ('volume', 'mixed'):
vol_src = setup_volume_source_space(
subject_id, 7., mri='aseg.mgz',
volume_label='Left-Cerebellum-Cortex',
subjects_dir=subjects_dir, add_interpolator=False)
assert len(vol_src) == 1
assert vol_src[0]['nuse'] == 150
if src == 'mixed':
sample_src = sample_src + vol_src
else:
sample_src = vol_src
klass = VolSourceEstimate
meth = 'plot_3d'
assert sample_src.kind == src
# dense version
rng = np.random.RandomState(0)
vertices = [s['vertno'] for s in sample_src]
n_verts = sum(len(v) for v in vertices)
stc_data = np.zeros((n_verts * n_time))
stc_size = stc_data.size
stc_data[(rng.rand(stc_size // 20) * stc_size).astype(int)] = \
rng.rand(stc_data.size // 20)
stc_data.shape = (n_verts, n_time)
if diverging:
stc_data -= 0.5
stc = klass(stc_data, vertices, 1, 1)
clim = dict(kind='value', lims=[0.1, 0.2, 0.3])
if diverging:
clim['pos_lims'] = clim.pop('lims')
brain_data = getattr(stc, meth)(
subject=subject_id, hemi=hemi, surface=surf, size=size,
subjects_dir=subjects_dir, colormap='auto',
clim=clim, src=sample_src,
**kwargs)
return brain_data
| bsd-3-clause |
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/pandas/tools/merge.py | 4 | 41529 | """
SQL-style merge routines
"""
import types
import numpy as np
from pandas.compat import range, long, lrange, lzip, zip, map, filter
import pandas.compat as compat
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame, _merge_doc
from pandas.core.generic import NDFrame
from pandas.core.series import Series
from pandas.core.index import (Index, MultiIndex, _get_combined_index,
_ensure_index, _get_consensus_names,
_all_indexes_same)
from pandas.core.internals import (items_overlap_with_suffix,
concatenate_block_managers)
from pandas.util.decorators import Appender, Substitution
from pandas.core.common import ABCSeries
from pandas.io.parsers import TextFileReader
import pandas.core.common as com
import pandas.lib as lib
import pandas.algos as algos
import pandas.hashtable as _hash
@Substitution('\nleft : DataFrame')
@Appender(_merge_doc, indents=0)
def merge(left, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True):
op = _MergeOperation(left, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, sort=sort, suffixes=suffixes,
copy=copy)
return op.get_result()
if __debug__:
merge.__doc__ = _merge_doc % '\nleft : DataFrame'
class MergeError(ValueError):
pass
def ordered_merge(left, right, on=None, left_by=None, right_by=None,
left_on=None, right_on=None,
fill_method=None, suffixes=('_x', '_y')):
"""Perform merge with optional filling/interpolation designed for ordered
data like time series data. Optionally perform group-wise merge (see
examples)
Parameters
----------
left : DataFrame
right : DataFrame
fill_method : {'ffill', None}, default None
Interpolation method for data
on : label or list
Field names to join on. Must be found in both DataFrames.
left_on : label or list, or array-like
Field names to join on in left DataFrame. Can be a vector or list of
vectors of the length of the DataFrame to use a particular vector as
the join key instead of columns
right_on : label or list, or array-like
Field names to join on in right DataFrame or vector/list of vectors per
left_on docs
left_by : column name or list of column names
Group left DataFrame by group columns and merge piece by piece with
right DataFrame
right_by : column name or list of column names
Group right DataFrame by group columns and merge piece by piece with
left DataFrame
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively
Examples
--------
>>> A >>> B
key lvalue group key rvalue
0 a 1 a 0 b 1
1 c 2 a 1 c 2
2 e 3 a 2 d 3
3 a 1 b
4 c 2 b
5 e 3 b
>>> ordered_merge(A, B, fill_method='ffill', left_by='group')
key lvalue group rvalue
0 a 1 a NaN
1 b 1 a 1
2 c 2 a 2
3 d 2 a 3
4 e 3 a 3
5 f 3 a 4
6 a 1 b NaN
7 b 1 b 1
8 c 2 b 2
9 d 2 b 3
10 e 3 b 3
11 f 3 b 4
Returns
-------
merged : DataFrame
The output type will the be same as 'left', if it is a subclass
of DataFrame.
"""
def _merger(x, y):
op = _OrderedMerge(x, y, on=on, left_on=left_on, right_on=right_on,
# left_index=left_index, right_index=right_index,
suffixes=suffixes, fill_method=fill_method)
return op.get_result()
if left_by is not None and right_by is not None:
raise ValueError('Can only group either left or right frames')
elif left_by is not None:
if not isinstance(left_by, (list, tuple)):
left_by = [left_by]
pieces = []
for key, xpiece in left.groupby(left_by):
merged = _merger(xpiece, right)
for k in left_by:
# May have passed ndarray
try:
if k in merged:
merged[k] = key
except:
pass
pieces.append(merged)
return concat(pieces, ignore_index=True)
elif right_by is not None:
if not isinstance(right_by, (list, tuple)):
right_by = [right_by]
pieces = []
for key, ypiece in right.groupby(right_by):
merged = _merger(left, ypiece)
for k in right_by:
try:
if k in merged:
merged[k] = key
except:
pass
pieces.append(merged)
return concat(pieces, ignore_index=True)
else:
return _merger(left, right)
# TODO: transformations??
# TODO: only copy DataFrames when modification necessary
class _MergeOperation(object):
"""
Perform a database (SQL) merge operation between two DataFrame objects
using either columns as keys or their row indexes
"""
def __init__(self, left, right, how='inner', on=None,
left_on=None, right_on=None, axis=1,
left_index=False, right_index=False, sort=True,
suffixes=('_x', '_y'), copy=True):
self.left = self.orig_left = left
self.right = self.orig_right = right
self.how = how
self.axis = axis
self.on = com._maybe_make_list(on)
self.left_on = com._maybe_make_list(left_on)
self.right_on = com._maybe_make_list(right_on)
self.copy = copy
self.suffixes = suffixes
self.sort = sort
self.left_index = left_index
self.right_index = right_index
# note this function has side effects
(self.left_join_keys,
self.right_join_keys,
self.join_names) = self._get_merge_keys()
def get_result(self):
join_index, left_indexer, right_indexer = self._get_join_info()
ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf,
rdata.items, rsuf)
lindexers = {1: left_indexer} if left_indexer is not None else {}
rindexers = {1: right_indexer} if right_indexer is not None else {}
result_data = concatenate_block_managers(
[(ldata, lindexers), (rdata, rindexers)],
axes=[llabels.append(rlabels), join_index],
concat_axis=0, copy=self.copy)
typ = self.left._constructor
result = typ(result_data).__finalize__(self, method='merge')
self._maybe_add_join_keys(result, left_indexer, right_indexer)
return result
def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
# insert group keys
keys = zip(self.join_names, self.left_on, self.right_on)
for i, (name, lname, rname) in enumerate(keys):
if not _should_fill(lname, rname):
continue
if name in result:
key_col = result[name]
if left_indexer is not None and right_indexer is not None:
if name in self.left:
na_indexer = (left_indexer == -1).nonzero()[0]
if len(na_indexer) == 0:
continue
right_na_indexer = right_indexer.take(na_indexer)
key_col.put(
na_indexer, com.take_1d(self.right_join_keys[i],
right_na_indexer))
elif name in self.right:
na_indexer = (right_indexer == -1).nonzero()[0]
if len(na_indexer) == 0:
continue
left_na_indexer = left_indexer.take(na_indexer)
key_col.put(na_indexer, com.take_1d(self.left_join_keys[i],
left_na_indexer))
elif left_indexer is not None \
and isinstance(self.left_join_keys[i], np.ndarray):
if name is None:
name = 'key_%d' % i
# a faster way?
key_col = com.take_1d(self.left_join_keys[i], left_indexer)
na_indexer = (left_indexer == -1).nonzero()[0]
right_na_indexer = right_indexer.take(na_indexer)
key_col.put(na_indexer, com.take_1d(self.right_join_keys[i],
right_na_indexer))
result.insert(i, name, key_col)
def _get_join_info(self):
left_ax = self.left._data.axes[self.axis]
right_ax = self.right._data.axes[self.axis]
if self.left_index and self.right_index:
join_index, left_indexer, right_indexer = \
left_ax.join(right_ax, how=self.how, return_indexers=True)
elif self.right_index and self.how == 'left':
join_index, left_indexer, right_indexer = \
_left_join_on_index(left_ax, right_ax, self.left_join_keys,
sort=self.sort)
elif self.left_index and self.how == 'right':
join_index, right_indexer, left_indexer = \
_left_join_on_index(right_ax, left_ax, self.right_join_keys,
sort=self.sort)
else:
(left_indexer,
right_indexer) = _get_join_indexers(self.left_join_keys,
self.right_join_keys,
sort=self.sort, how=self.how)
if self.right_index:
join_index = self.left.index.take(left_indexer)
elif self.left_index:
join_index = self.right.index.take(right_indexer)
else:
join_index = Index(np.arange(len(left_indexer)))
return join_index, left_indexer, right_indexer
def _get_merge_data(self):
"""
Handles overlapping column names etc.
"""
ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = items_overlap_with_suffix(
ldata.items, lsuf, rdata.items, rsuf)
if not llabels.equals(ldata.items):
ldata = ldata.copy(deep=False)
ldata.set_axis(0, llabels)
if not rlabels.equals(rdata.items):
rdata = rdata.copy(deep=False)
rdata.set_axis(0, rlabels)
return ldata, rdata
def _get_merge_keys(self):
"""
Note: has side effects (copy/delete key columns)
Parameters
----------
left
right
on
Returns
-------
left_keys, right_keys
"""
self._validate_specification()
left_keys = []
right_keys = []
join_names = []
right_drop = []
left_drop = []
left, right = self.left, self.right
is_lkey = lambda x: isinstance(x, (np.ndarray, ABCSeries)) and len(x) == len(left)
is_rkey = lambda x: isinstance(x, (np.ndarray, ABCSeries)) and len(x) == len(right)
# ugh, spaghetti re #733
if _any(self.left_on) and _any(self.right_on):
for lk, rk in zip(self.left_on, self.right_on):
if is_lkey(lk):
left_keys.append(lk)
if is_rkey(rk):
right_keys.append(rk)
join_names.append(None) # what to do?
else:
right_keys.append(right[rk].values)
join_names.append(rk)
else:
if not is_rkey(rk):
right_keys.append(right[rk].values)
if lk == rk:
# avoid key upcast in corner case (length-0)
if len(left) > 0:
right_drop.append(rk)
else:
left_drop.append(lk)
else:
right_keys.append(rk)
left_keys.append(left[lk].values)
join_names.append(lk)
elif _any(self.left_on):
for k in self.left_on:
if is_lkey(k):
left_keys.append(k)
join_names.append(None)
else:
left_keys.append(left[k].values)
join_names.append(k)
if isinstance(self.right.index, MultiIndex):
right_keys = [lev.values.take(lab)
for lev, lab in zip(self.right.index.levels,
self.right.index.labels)]
else:
right_keys = [self.right.index.values]
elif _any(self.right_on):
for k in self.right_on:
if is_rkey(k):
right_keys.append(k)
join_names.append(None)
else:
right_keys.append(right[k].values)
join_names.append(k)
if isinstance(self.left.index, MultiIndex):
left_keys = [lev.values.take(lab)
for lev, lab in zip(self.left.index.levels,
self.left.index.labels)]
else:
left_keys = [self.left.index.values]
if left_drop:
self.left = self.left.drop(left_drop, axis=1)
if right_drop:
self.right = self.right.drop(right_drop, axis=1)
return left_keys, right_keys, join_names
def _validate_specification(self):
# Hm, any way to make this logic less complicated??
if (self.on is None and self.left_on is None
and self.right_on is None):
if self.left_index and self.right_index:
self.left_on, self.right_on = (), ()
elif self.left_index:
if self.right_on is None:
raise MergeError('Must pass right_on or right_index=True')
elif self.right_index:
if self.left_on is None:
raise MergeError('Must pass left_on or left_index=True')
else:
if not self.left.columns.is_unique:
raise MergeError("Left data columns not unique: %s"
% repr(self.left.columns))
if not self.right.columns.is_unique:
raise MergeError("Right data columns not unique: %s"
% repr(self.right.columns))
# use the common columns
common_cols = self.left.columns.intersection(
self.right.columns)
if len(common_cols) == 0:
raise MergeError('No common columns to perform merge on')
self.left_on = self.right_on = common_cols
elif self.on is not None:
if self.left_on is not None or self.right_on is not None:
raise MergeError('Can only pass on OR left_on and '
'right_on')
self.left_on = self.right_on = self.on
elif self.left_on is not None:
n = len(self.left_on)
if self.right_index:
if len(self.left_on) != self.right.index.nlevels:
raise ValueError('len(left_on) must equal the number '
'of levels in the index of "right"')
self.right_on = [None] * n
elif self.right_on is not None:
n = len(self.right_on)
if self.left_index:
if len(self.right_on) != self.left.index.nlevels:
raise ValueError('len(right_on) must equal the number '
'of levels in the index of "left"')
self.left_on = [None] * n
if len(self.right_on) != len(self.left_on):
raise ValueError("len(right_on) must equal len(left_on)")
def _get_join_indexers(left_keys, right_keys, sort=False, how='inner'):
"""
Parameters
----------
Returns
-------
"""
from functools import partial
assert len(left_keys) == len(right_keys), \
'left_key and right_keys must be the same length'
# bind `sort` arg. of _factorize_keys
fkeys = partial(_factorize_keys, sort=sort)
# get left & right join labels and num. of levels at each location
llab, rlab, shape = map(list, zip( * map(fkeys, left_keys, right_keys)))
# get flat i8 keys from label lists
lkey, rkey = _get_join_keys(llab, rlab, shape, sort)
# factorize keys to a dense i8 space
# `count` is the num. of unique keys
# set(lkey) | set(rkey) == range(count)
lkey, rkey, count = fkeys(lkey, rkey)
# preserve left frame order if how == 'left' and sort == False
kwargs = {'sort':sort} if how == 'left' else {}
join_func = _join_functions[how]
return join_func(lkey, rkey, count, **kwargs)
class _OrderedMerge(_MergeOperation):
def __init__(self, left, right, on=None, by=None, left_on=None,
right_on=None, axis=1, left_index=False, right_index=False,
suffixes=('_x', '_y'), copy=True,
fill_method=None):
self.fill_method = fill_method
_MergeOperation.__init__(self, left, right, on=on, left_on=left_on,
right_on=right_on, axis=axis,
left_index=left_index,
right_index=right_index,
how='outer', suffixes=suffixes,
sort=True # sorts when factorizing
)
def get_result(self):
join_index, left_indexer, right_indexer = self._get_join_info()
# this is a bit kludgy
ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf,
rdata.items, rsuf)
if self.fill_method == 'ffill':
left_join_indexer = algos.ffill_indexer(left_indexer)
right_join_indexer = algos.ffill_indexer(right_indexer)
else:
left_join_indexer = left_indexer
right_join_indexer = right_indexer
lindexers = {1: left_join_indexer} if left_join_indexer is not None else {}
rindexers = {1: right_join_indexer} if right_join_indexer is not None else {}
result_data = concatenate_block_managers(
[(ldata, lindexers), (rdata, rindexers)],
axes=[llabels.append(rlabels), join_index],
concat_axis=0, copy=self.copy)
typ = self.left._constructor
result = typ(result_data).__finalize__(self, method='ordered_merge')
self._maybe_add_join_keys(result, left_indexer, right_indexer)
return result
def _get_multiindex_indexer(join_keys, index, sort):
from functools import partial
# bind `sort` argument
fkeys = partial(_factorize_keys, sort=sort)
# left & right join labels and num. of levels at each location
rlab, llab, shape = map(list, zip( * map(fkeys, index.levels, join_keys)))
if sort:
rlab = list(map(np.take, rlab, index.labels))
else:
i8copy = lambda a: a.astype('i8', subok=False, copy=True)
rlab = list(map(i8copy, index.labels))
# fix right labels if there were any nulls
for i in range(len(join_keys)):
mask = index.labels[i] == -1
if mask.any():
# check if there already was any nulls at this location
# if there was, it is factorized to `shape[i] - 1`
a = join_keys[i][llab[i] == shape[i] - 1]
if a.size == 0 or not a[0] != a[0]:
shape[i] += 1
rlab[i][mask] = shape[i] - 1
# get flat i8 join keys
lkey, rkey = _get_join_keys(llab, rlab, shape, sort)
# factorize keys to a dense i8 space
lkey, rkey, count = fkeys(lkey, rkey)
return algos.left_outer_join(lkey, rkey, count, sort=sort)
def _get_single_indexer(join_key, index, sort=False):
left_key, right_key, count = _factorize_keys(join_key, index, sort=sort)
left_indexer, right_indexer = \
algos.left_outer_join(com._ensure_int64(left_key),
com._ensure_int64(right_key),
count, sort=sort)
return left_indexer, right_indexer
def _left_join_on_index(left_ax, right_ax, join_keys, sort=False):
if len(join_keys) > 1:
if not ((isinstance(right_ax, MultiIndex) and
len(join_keys) == right_ax.nlevels)):
raise AssertionError("If more than one join key is given then "
"'right_ax' must be a MultiIndex and the "
"number of join keys must be the number of "
"levels in right_ax")
left_indexer, right_indexer = \
_get_multiindex_indexer(join_keys, right_ax, sort=sort)
else:
jkey = join_keys[0]
left_indexer, right_indexer = \
_get_single_indexer(jkey, right_ax, sort=sort)
if sort or len(left_ax) != len(left_indexer):
# if asked to sort or there are 1-to-many matches
join_index = left_ax.take(left_indexer)
return join_index, left_indexer, right_indexer
# left frame preserves order & length of its index
return left_ax, None, right_indexer
def _right_outer_join(x, y, max_groups):
right_indexer, left_indexer = algos.left_outer_join(y, x, max_groups)
return left_indexer, right_indexer
_join_functions = {
'inner': algos.inner_join,
'left': algos.left_outer_join,
'right': _right_outer_join,
'outer': algos.full_outer_join,
}
def _factorize_keys(lk, rk, sort=True):
if com.is_int_or_datetime_dtype(lk) and com.is_int_or_datetime_dtype(rk):
klass = _hash.Int64Factorizer
lk = com._ensure_int64(lk)
rk = com._ensure_int64(rk)
else:
klass = _hash.Factorizer
lk = com._ensure_object(lk)
rk = com._ensure_object(rk)
rizer = klass(max(len(lk), len(rk)))
llab = rizer.factorize(lk)
rlab = rizer.factorize(rk)
count = rizer.get_count()
if sort:
uniques = rizer.uniques.to_array()
llab, rlab = _sort_labels(uniques, llab, rlab)
# NA group
lmask = llab == -1
lany = lmask.any()
rmask = rlab == -1
rany = rmask.any()
if lany or rany:
if lany:
np.putmask(llab, lmask, count)
if rany:
np.putmask(rlab, rmask, count)
count += 1
return llab, rlab, count
def _sort_labels(uniques, left, right):
if not isinstance(uniques, np.ndarray):
# tuplesafe
uniques = Index(uniques).values
sorter = uniques.argsort()
reverse_indexer = np.empty(len(sorter), dtype=np.int64)
reverse_indexer.put(sorter, np.arange(len(sorter)))
new_left = reverse_indexer.take(com._ensure_platform_int(left))
np.putmask(new_left, left == -1, -1)
new_right = reverse_indexer.take(com._ensure_platform_int(right))
np.putmask(new_right, right == -1, -1)
return new_left, new_right
def _get_join_keys(llab, rlab, shape, sort):
from pandas.core.groupby import _int64_overflow_possible
# how many levels can be done without overflow
pred = lambda i: not _int64_overflow_possible(shape[:i])
nlev = next(filter(pred, range(len(shape), 0, -1)))
# get keys for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype='i8')
lkey = stride * llab[0].astype('i8', subok=False, copy=False)
rkey = stride * rlab[0].astype('i8', subok=False, copy=False)
for i in range(1, nlev):
stride //= shape[i]
lkey += llab[i] * stride
rkey += rlab[i] * stride
if nlev == len(shape): # all done!
return lkey, rkey
# densify current keys to avoid overflow
lkey, rkey, count = _factorize_keys(lkey, rkey, sort=sort)
llab = [lkey] + llab[nlev:]
rlab = [rkey] + rlab[nlev:]
shape = [count] + shape[nlev:]
return _get_join_keys(llab, rlab, shape, sort)
#----------------------------------------------------------------------
# Concatenate DataFrame objects
def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False, copy=True):
"""
Concatenate pandas objects along a particular axis with optional set logic
along the other axes. Can also add a layer of hierarchical indexing on the
concatenation axis, which may be useful if the labels are the same (or
overlapping) on the passed axis number
Parameters
----------
objs : a sequence or mapping of Series, DataFrame, or Panel objects
If a dict is passed, the sorted keys will be used as the `keys`
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised
axis : {0, 1, ...}, default 0
The axis to concatenate along
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis(es)
join_axes : list of Index objects
Specific indexes to use for the other n - 1 axes instead of performing
inner/outer set logic
verify_integrity : boolean, default False
Check whether the new concatenated axis contains duplicates. This can
be very expensive relative to the actual data concatenation
keys : sequence, default None
If multiple levels passed, should contain tuples. Construct
hierarchical index using the passed keys as the outermost level
levels : list of sequences, default None
Specific levels (unique values) to use for constructing a
MultiIndex. Otherwise they will be inferred from the keys
names : list, default None
Names for the levels in the resulting hierarchical index
ignore_index : boolean, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the the index values on the other
axes are still respected in the join.
copy : boolean, default True
If False, do not copy data unnecessarily
Notes
-----
The keys, levels, and names arguments are all optional
Returns
-------
concatenated : type of objects
"""
op = _Concatenator(objs, axis=axis, join_axes=join_axes,
ignore_index=ignore_index, join=join,
keys=keys, levels=levels, names=names,
verify_integrity=verify_integrity,
copy=copy)
return op.get_result()
class _Concatenator(object):
"""
Orchestrates a concatenation operation for BlockManagers
"""
def __init__(self, objs, axis=0, join='outer', join_axes=None,
keys=None, levels=None, names=None,
ignore_index=False, verify_integrity=False, copy=True):
if isinstance(objs, (NDFrame, compat.string_types)):
raise TypeError('first argument must be an iterable of pandas '
'objects, you passed an object of type '
'"{0}"'.format(type(objs).__name__))
if join == 'outer':
self.intersect = False
elif join == 'inner':
self.intersect = True
else: # pragma: no cover
raise ValueError('Only can inner (intersect) or outer (union) '
'join the other axis')
if isinstance(objs, dict):
if keys is None:
keys = sorted(objs)
objs = [objs[k] for k in keys]
if keys is None:
objs = [obj for obj in objs if obj is not None ]
else:
# #1649
clean_keys = []
clean_objs = []
for k, v in zip(keys, objs):
if v is None:
continue
clean_keys.append(k)
clean_objs.append(v)
objs = clean_objs
keys = clean_keys
if len(objs) == 0:
raise ValueError('All objects passed were None')
# consolidate data & figure out what our result ndim is going to be
ndims = set()
for obj in objs:
if not isinstance(obj, NDFrame):
raise TypeError("cannot concatenate a non-NDFrame object")
# consolidate
obj.consolidate(inplace=True)
ndims.add(obj.ndim)
# get the sample
# want the higest ndim that we have, and must be non-empty
# unless all objs are empty
sample = None
if len(ndims) > 1:
max_ndim = max(ndims)
for obj in objs:
if obj.ndim == max_ndim and np.sum(obj.shape):
sample = obj
break
else:
# filter out the empties
# if we have not multi-index possibiltes
df = DataFrame([ obj.shape for obj in objs ]).sum(1)
non_empties = df[df!=0]
if len(non_empties) and (keys is None and names is None and levels is None and join_axes is None):
objs = [ objs[i] for i in non_empties.index ]
sample = objs[0]
if sample is None:
sample = objs[0]
self.objs = objs
# Need to flip BlockManager axis in the DataFrame special case
self._is_frame = isinstance(sample, DataFrame)
if self._is_frame:
axis = 1 if axis == 0 else 0
self._is_series = isinstance(sample, ABCSeries)
if not 0 <= axis <= sample.ndim:
raise AssertionError("axis must be between 0 and {0}, "
"input was {1}".format(sample.ndim, axis))
# if we have mixed ndims, then convert to highest ndim
# creating column numbers as needed
if len(ndims) > 1:
current_column = 0
max_ndim = sample.ndim
self.objs, objs = [], self.objs
for obj in objs:
ndim = obj.ndim
if ndim == max_ndim:
pass
elif ndim != max_ndim-1:
raise ValueError("cannot concatenate unaligned mixed "
"dimensional NDFrame objects")
else:
name = getattr(obj,'name',None)
if ignore_index or name is None:
name = current_column
current_column += 1
# doing a row-wise concatenation so need everything
# to line up
if self._is_frame and axis == 1:
name = 0
obj = sample._constructor({ name : obj })
self.objs.append(obj)
# note: this is the BlockManager axis (since DataFrame is transposed)
self.axis = axis
self.join_axes = join_axes
self.keys = keys
self.names = names
self.levels = levels
self.ignore_index = ignore_index
self.verify_integrity = verify_integrity
self.copy = copy
self.new_axes = self._get_new_axes()
def get_result(self):
# series only
if self._is_series:
# stack blocks
if self.axis == 0:
new_data = com._concat_compat([x.values for x in self.objs])
name = com._consensus_name_attr(self.objs)
return Series(new_data, index=self.new_axes[0], name=name).__finalize__(self, method='concat')
# combine as columns in a frame
else:
data = dict(zip(range(len(self.objs)), self.objs))
index, columns = self.new_axes
tmpdf = DataFrame(data, index=index)
if columns is not None:
tmpdf.columns = columns
return tmpdf.__finalize__(self, method='concat')
# combine block managers
else:
mgrs_indexers = []
for obj in self.objs:
mgr = obj._data
indexers = {}
for ax, new_labels in enumerate(self.new_axes):
if ax == self.axis:
# Suppress reindexing on concat axis
continue
obj_labels = mgr.axes[ax]
if not new_labels.equals(obj_labels):
indexers[ax] = obj_labels.reindex(new_labels)[1]
mgrs_indexers.append((obj._data, indexers))
new_data = concatenate_block_managers(
mgrs_indexers, self.new_axes, concat_axis=self.axis, copy=self.copy)
if not self.copy:
new_data._consolidate_inplace()
return self.objs[0]._from_axes(new_data, self.new_axes).__finalize__(self, method='concat')
def _get_result_dim(self):
if self._is_series and self.axis == 1:
return 2
else:
return self.objs[0].ndim
def _get_new_axes(self):
ndim = self._get_result_dim()
new_axes = [None] * ndim
if self.join_axes is None:
for i in range(ndim):
if i == self.axis:
continue
new_axes[i] = self._get_comb_axis(i)
else:
if len(self.join_axes) != ndim - 1:
raise AssertionError("length of join_axes must not be "
"equal to {0}".format(ndim - 1))
# ufff...
indices = lrange(ndim)
indices.remove(self.axis)
for i, ax in zip(indices, self.join_axes):
new_axes[i] = ax
new_axes[self.axis] = self._get_concat_axis()
return new_axes
def _get_comb_axis(self, i):
if self._is_series:
all_indexes = [x.index for x in self.objs]
else:
try:
all_indexes = [x._data.axes[i] for x in self.objs]
except IndexError:
types = [type(x).__name__ for x in self.objs]
raise TypeError("Cannot concatenate list of %s" % types)
return _get_combined_index(all_indexes, intersect=self.intersect)
def _get_concat_axis(self):
"""
Return index to be used along concatenation axis.
"""
if self._is_series:
if self.axis == 0:
indexes = [x.index for x in self.objs]
elif self.ignore_index:
idx = Index(np.arange(len(self.objs)))
idx.is_unique = True # arange is always unique
return idx
elif self.keys is None:
names = []
for x in self.objs:
if not isinstance(x, Series):
raise TypeError("Cannot concatenate type 'Series' "
"with object of type "
"%r" % type(x).__name__)
if x.name is not None:
names.append(x.name)
else:
idx = Index(np.arange(len(self.objs)))
idx.is_unique = True
return idx
return Index(names)
else:
return _ensure_index(self.keys)
else:
indexes = [x._data.axes[self.axis] for x in self.objs]
if self.ignore_index:
idx = Index(np.arange(sum(len(i) for i in indexes)))
idx.is_unique = True
return idx
if self.keys is None:
concat_axis = _concat_indexes(indexes)
else:
concat_axis = _make_concat_multiindex(indexes, self.keys,
self.levels, self.names)
self._maybe_check_integrity(concat_axis)
return concat_axis
def _maybe_check_integrity(self, concat_index):
if self.verify_integrity:
if not concat_index.is_unique:
overlap = concat_index.get_duplicates()
raise ValueError('Indexes have overlapping values: %s'
% str(overlap))
def _concat_indexes(indexes):
return indexes[0].append(indexes[1:])
def _make_concat_multiindex(indexes, keys, levels=None, names=None):
if ((levels is None and isinstance(keys[0], tuple)) or
(levels is not None and len(levels) > 1)):
zipped = lzip(*keys)
if names is None:
names = [None] * len(zipped)
if levels is None:
levels = [Categorical.from_array(zp, ordered=True).categories for zp in zipped]
else:
levels = [_ensure_index(x) for x in levels]
else:
zipped = [keys]
if names is None:
names = [None]
if levels is None:
levels = [_ensure_index(keys)]
else:
levels = [_ensure_index(x) for x in levels]
if not _all_indexes_same(indexes):
label_list = []
# things are potentially different sizes, so compute the exact labels
# for each level and pass those to MultiIndex.from_arrays
for hlevel, level in zip(zipped, levels):
to_concat = []
for key, index in zip(hlevel, indexes):
try:
i = level.get_loc(key)
except KeyError:
raise ValueError('Key %s not in level %s'
% (str(key), str(level)))
to_concat.append(np.repeat(i, len(index)))
label_list.append(np.concatenate(to_concat))
concat_index = _concat_indexes(indexes)
# these go at the end
if isinstance(concat_index, MultiIndex):
levels.extend(concat_index.levels)
label_list.extend(concat_index.labels)
else:
factor = Categorical.from_array(concat_index, ordered=True)
levels.append(factor.categories)
label_list.append(factor.codes)
if len(names) == len(levels):
names = list(names)
else:
# make sure that all of the passed indices have the same nlevels
if not len(set([ i.nlevels for i in indexes ])) == 1:
raise AssertionError("Cannot concat indices that do"
" not have the same number of levels")
# also copies
names = names + _get_consensus_names(indexes)
return MultiIndex(levels=levels, labels=label_list, names=names,
verify_integrity=False)
new_index = indexes[0]
n = len(new_index)
kpieces = len(indexes)
# also copies
new_names = list(names)
new_levels = list(levels)
# construct labels
new_labels = []
# do something a bit more speedy
for hlevel, level in zip(zipped, levels):
hlevel = _ensure_index(hlevel)
mapped = level.get_indexer(hlevel)
mask = mapped == -1
if mask.any():
raise ValueError('Values not found in passed level: %s'
% str(hlevel[mask]))
new_labels.append(np.repeat(mapped, n))
if isinstance(new_index, MultiIndex):
new_levels.extend(new_index.levels)
new_labels.extend([np.tile(lab, kpieces) for lab in new_index.labels])
else:
new_levels.append(new_index)
new_labels.append(np.tile(np.arange(n), kpieces))
if len(new_names) < len(new_levels):
new_names.extend(new_index.names)
return MultiIndex(levels=new_levels, labels=new_labels, names=new_names,
verify_integrity=False)
def _should_fill(lname, rname):
if not isinstance(lname, compat.string_types) or not isinstance(rname, compat.string_types):
return True
return lname == rname
def _any(x):
return x is not None and len(x) > 0 and any([y is not None for y in x])
| gpl-2.0 |
Erotemic/utool | utool/util_graph.py | 1 | 85803 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
try:
import numpy as np
except ImportError:
pass
try:
import networkx as nx
except ImportError:
pass
import collections
import functools
from utool import util_inject
from utool import util_const
from six.moves import reduce, zip, range
import itertools as it
(print, rrr, profile) = util_inject.inject2(__name__)
def nx_topsort_nodes(graph, nodes):
import utool as ut
node_rank = ut.nx_topsort_rank(graph, nodes)
node_idx = ut.rebase_labels(node_rank)
sorted_nodes = ut.take(nodes, node_idx)
return sorted_nodes
def nx_topsort_rank(graph, nodes=None):
"""
graph = inputs.exi_graph.reverse()
nodes = flat_node_order_
"""
import utool as ut
if False:
# Determenistic version
# Ok, this doesn't work.
dag_ranks = nx_dag_node_rank(graph, nodes)
toprank = ut.argsort(dag_ranks, list(map(str, nodes)))
else:
# Non-determenistic version
dag_ranks = nx_dag_node_rank(graph, nodes)
topsort = list(nx.topological_sort(graph))
# print('topsort = %r' % (topsort,))
node_to_top_rank = ut.make_index_lookup(topsort)
toprank = ut.dict_take(node_to_top_rank, nodes)
return toprank
def nx_common_descendants(graph, node1, node2):
descendants1 = nx.descendants(graph, node1)
descendants2 = nx.descendants(graph, node2)
common_descendants = set.intersection(descendants1, descendants2)
return common_descendants
def nx_common_ancestors(graph, node1, node2):
ancestors1 = nx.ancestors(graph, node1)
ancestors2 = nx.ancestors(graph, node2)
common_ancestors = set.intersection(ancestors1, ancestors2)
return common_ancestors
def nx_make_adj_matrix(G):
import utool as ut
nodes = list(G.nodes())
node2_idx = ut.make_index_lookup(nodes)
edges = list(G.edges())
edge2_idx = ut.partial(ut.dict_take, node2_idx)
uv_list = ut.lmap(edge2_idx, edges)
A = np.zeros((len(nodes), len(nodes)))
A[tuple(np.array(uv_list).T)] = 1
return A
def nx_transitive_reduction(G, mode=1):
"""
References:
https://en.wikipedia.org/wiki/Transitive_reduction#Computing_the_reduction_using_the_closure
http://dept-info.labri.fr/~thibault/tmp/0201008.pdf
http://stackoverflow.com/questions/17078696/transitive-reduction-of-directed-graph-in-python
CommandLine:
python -m utool.util_graph nx_transitive_reduction --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> G = nx.DiGraph([('a', 'b'), ('a', 'c'), ('a', 'e'),
>>> ('a', 'd'), ('b', 'd'), ('c', 'e'),
>>> ('d', 'e'), ('c', 'e'), ('c', 'd')])
>>> G = testdata_graph()[1]
>>> G_tr = nx_transitive_reduction(G, mode=1)
>>> G_tr2 = nx_transitive_reduction(G, mode=1)
>>> ut.quit_if_noshow()
>>> try:
>>> import plottool_ibeis as pt
>>> except ImportError:
>>> import plottool as pt
>>> G_ = nx.dag.transitive_closure(G)
>>> pt.show_nx(G , pnum=(1, 5, 1), fnum=1)
>>> pt.show_nx(G_tr , pnum=(1, 5, 2), fnum=1)
>>> pt.show_nx(G_tr2 , pnum=(1, 5, 3), fnum=1)
>>> pt.show_nx(G_ , pnum=(1, 5, 4), fnum=1)
>>> pt.show_nx(nx.dag.transitive_closure(G_tr), pnum=(1, 5, 5), fnum=1)
>>> ut.show_if_requested()
"""
import utool as ut
has_cycles = not nx.is_directed_acyclic_graph(G)
if has_cycles:
# FIXME: this does not work for cycle graphs.
# Need to do algorithm on SCCs
G_orig = G
G = nx.condensation(G_orig)
nodes = list(G.nodes())
node2_idx = ut.make_index_lookup(nodes)
# For each node u, perform DFS consider its set of (non-self) children C.
# For each descendant v, of a node in C, remove any edge from u to v.
if mode == 1:
G_tr = G.copy()
for parent in G_tr.nodes():
# Remove self loops
if G_tr.has_edge(parent, parent):
G_tr.remove_edge(parent, parent)
# For each child of the parent
for child in list(G_tr.successors(parent)):
# Preorder nodes includes its argument (no added complexity)
for gchild in list(G_tr.successors(child)):
# Remove all edges from parent to non-child descendants
for descendant in nx.dfs_preorder_nodes(G_tr, gchild):
if G_tr.has_edge(parent, descendant):
G_tr.remove_edge(parent, descendant)
if has_cycles:
# Uncondense graph
uncondensed_G_tr = G.__class__()
mapping = G.graph['mapping']
uncondensed_G_tr.add_nodes_from(mapping.keys())
inv_mapping = ut.invert_dict(mapping, unique_vals=False)
for u, v in G_tr.edges():
u_ = inv_mapping[u][0]
v_ = inv_mapping[v][0]
uncondensed_G_tr.add_edge(u_, v_)
for key, path in inv_mapping.items():
if len(path) > 1:
directed_cycle = list(ut.itertwo(path, wrap=True))
uncondensed_G_tr.add_edges_from(directed_cycle)
G_tr = uncondensed_G_tr
else:
def make_adj_matrix(G):
edges = list(G.edges())
edge2_idx = ut.partial(ut.dict_take, node2_idx)
uv_list = ut.lmap(edge2_idx, edges)
A = np.zeros((len(nodes), len(nodes)))
A[tuple(np.array(uv_list).T)] = 1
return A
G_ = nx.dag.transitive_closure(G)
A = make_adj_matrix(G)
B = make_adj_matrix(G_)
#AB = A * B
#AB = A.T.dot(B)
AB = A.dot(B)
#AB = A.dot(B.T)
A_and_notAB = np.logical_and(A, np.logical_not(AB))
tr_uvs = np.where(A_and_notAB)
#nodes = G.nodes()
edges = list(zip(*ut.unflat_take(nodes, tr_uvs)))
G_tr = G.__class__()
G_tr.add_nodes_from(nodes)
G_tr.add_edges_from(edges)
if has_cycles:
# Uncondense graph
uncondensed_G_tr = G.__class__()
mapping = G.graph['mapping']
uncondensed_G_tr.add_nodes_from(mapping.keys())
inv_mapping = ut.invert_dict(mapping, unique_vals=False)
for u, v in G_tr.edges():
u_ = inv_mapping[u][0]
v_ = inv_mapping[v][0]
uncondensed_G_tr.add_edge(u_, v_)
for key, path in inv_mapping.items():
if len(path) > 1:
directed_cycle = list(ut.itertwo(path, wrap=True))
uncondensed_G_tr.add_edges_from(directed_cycle)
G_tr = uncondensed_G_tr
return G_tr
def nx_source_nodes(graph):
# for node in nx.dag.topological_sort(graph):
for node in graph.nodes():
if graph.in_degree(node) == 0:
yield node
def nx_sink_nodes(graph):
# for node in nx.dag.topological_sort(graph):
for node in graph.nodes():
if graph.out_degree(node) == 0:
yield node
# def nx_sink_nodes(graph):
# topsort_iter = nx.dag.topological_sort(graph)
# sink_iter = (node for node in topsort_iter
# if graph.out_degree(node) == 0)
# return sink_iter
def nx_to_adj_dict(graph):
import utool as ut
adj_dict = ut.ddict(list)
for u, edges in graph.adjacency():
adj_dict[u].extend(list(edges.keys()))
adj_dict = dict(adj_dict)
return adj_dict
def nx_from_adj_dict(adj_dict, cls=None):
if cls is None:
cls = nx.DiGraph
nodes = list(adj_dict.keys())
edges = [(u, v) for u, adj in adj_dict.items() for v in adj]
graph = cls()
graph.add_nodes_from(nodes)
graph.add_edges_from(edges)
return graph
def nx_dag_node_rank(graph, nodes=None):
"""
Returns rank of nodes that define the "level" each node is on in a
topological sort. This is the same as the Graphviz dot rank.
Ignore:
simple_graph = ut.simplify_graph(exi_graph)
adj_dict = ut.nx_to_adj_dict(simple_graph)
import plottool as pt
pt.qt4ensure()
pt.show_nx(graph)
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> adj_dict = {0: [5], 1: [5], 2: [1], 3: [4], 4: [0], 5: [], 6: [4], 7: [9], 8: [6], 9: [1]}
>>> nodes = [2, 1, 5]
>>> f_graph = ut.nx_from_adj_dict(adj_dict, nx.DiGraph)
>>> graph = f_graph.reverse()
>>> #ranks = ut.nx_dag_node_rank(graph, nodes)
>>> ranks = ut.nx_dag_node_rank(graph, nodes)
>>> result = ('ranks = %r' % (ranks,))
>>> print(result)
ranks = [3, 2, 1]
"""
import utool as ut
source = list(ut.nx_source_nodes(graph))[0]
longest_paths = dict([(target, dag_longest_path(graph, source, target))
for target in graph.nodes()])
node_to_rank = ut.map_dict_vals(len, longest_paths)
if nodes is None:
return node_to_rank
else:
ranks = ut.dict_take(node_to_rank, nodes)
return ranks
def nx_all_nodes_between(graph, source, target, data=False):
"""
Find all nodes with on paths between source and target.
"""
import utool as ut
if source is None:
# assume there is a single source
sources = list(ut.nx_source_nodes(graph))
assert len(sources) == 1, (
'specify source if there is not only one')
source = sources[0]
if target is None:
# assume there is a single source
sinks = list(ut.nx_sink_nodes(graph))
assert len(sinks) == 1, (
'specify sink if there is not only one')
target = sinks[0]
all_simple_paths = list(nx.all_simple_paths(graph, source, target))
nodes = sorted(set.union(*map(set, all_simple_paths)))
return nodes
def nx_all_simple_edge_paths(G, source, target, cutoff=None, keys=False,
data=False):
"""
Returns each path from source to target as a list of edges.
This function is meant to be used with MultiGraphs or MultiDiGraphs.
When ``keys`` is True each edge in the path is returned with its unique key
identifier. In this case it is possible to distinguish between different
paths along different edges between the same two nodes.
Derived from simple_paths.py in networkx
"""
if cutoff is None:
cutoff = len(G) - 1
if cutoff < 1:
return
import utool as ut
import six
visited_nodes = [source]
visited_edges = []
if G.is_multigraph():
get_neighbs = ut.partial(G.edges, keys=keys, data=data)
else:
get_neighbs = ut.partial(G.edges, data=data)
edge_stack = [iter(get_neighbs(source))]
while edge_stack:
children_edges = edge_stack[-1]
child_edge = six.next(children_edges, None)
if child_edge is None:
edge_stack.pop()
visited_nodes.pop()
if len(visited_edges) > 0:
visited_edges.pop()
elif len(visited_nodes) < cutoff:
child_node = child_edge[1]
if child_node == target:
yield visited_edges + [child_edge]
elif child_node not in visited_nodes:
visited_nodes.append(child_node)
visited_edges.append(child_edge)
edge_stack.append(iter(get_neighbs(child_node)))
else:
for edge in [child_edge] + list(children_edges):
if edge[1] == target:
yield visited_edges + [edge]
edge_stack.pop()
visited_nodes.pop()
if len(visited_edges) > 0:
visited_edges.pop()
def nx_edges_between(graph, nodes1, nodes2=None, assume_disjoint=False,
assume_sparse=True):
r"""
Get edges between two components or within a single component
Args:
graph (nx.Graph): the graph
nodes1 (set): list of nodes
nodes2 (set): (default=None) if None it is equivlanet to nodes2=nodes1
assume_disjoint (bool): skips expensive check to ensure edges arnt
returned twice (default=False)
CommandLine:
python -m utool.util_graph --test-nx_edges_between
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> edges = [
>>> (1, 2), (2, 3), (3, 4), (4, 1), (4, 3), # cc 1234
>>> (1, 5), (7, 2), (5, 1), # cc 567 / 5678
>>> (7, 5), (5, 6), (8, 7),
>>> ]
>>> digraph = nx.DiGraph(edges)
>>> graph = nx.Graph(edges)
>>> nodes1 = [1, 2, 3, 4]
>>> nodes2 = [5, 6, 7]
>>> n2 = sorted(nx_edges_between(graph, nodes1, nodes2))
>>> n4 = sorted(nx_edges_between(graph, nodes1))
>>> n5 = sorted(nx_edges_between(graph, nodes1, nodes1))
>>> n1 = sorted(nx_edges_between(digraph, nodes1, nodes2))
>>> n3 = sorted(nx_edges_between(digraph, nodes1))
>>> print('n2 == %r' % (n2,))
>>> print('n4 == %r' % (n4,))
>>> print('n5 == %r' % (n5,))
>>> print('n1 == %r' % (n1,))
>>> print('n3 == %r' % (n3,))
>>> assert n2 == ([(1, 5), (2, 7)]), '2'
>>> assert n4 == ([(1, 2), (1, 4), (2, 3), (3, 4)]), '4'
>>> assert n5 == ([(1, 2), (1, 4), (2, 3), (3, 4)]), '5'
>>> assert n1 == ([(1, 5), (5, 1), (7, 2)]), '1'
>>> assert n3 == ([(1, 2), (2, 3), (3, 4), (4, 1), (4, 3)]), '3'
>>> n6 = sorted(nx_edges_between(digraph, nodes1 + [6], nodes2 + [1, 2], assume_sparse=True))
>>> print('n6 = %r' % (n6,))
>>> n6 = sorted(nx_edges_between(digraph, nodes1 + [6], nodes2 + [1, 2], assume_sparse=False))
>>> print('n6 = %r' % (n6,))
>>> assert n6 == ([(1, 2), (1, 5), (2, 3), (4, 1), (5, 1), (5, 6), (7, 2)]), '6'
Timeit:
from utool.util_graph import * # NOQA
# ut.timeit_compare()
import networkx as nx
import utool as ut
graph = nx.fast_gnp_random_graph(1000, .001)
list(nx.connected_components(graph))
rng = np.random.RandomState(0)
nodes1 = set(rng.choice(list(graph.nodes()), 500, replace=False))
nodes2 = set(graph.nodes()) - nodes1
edges_between = ut.nx_edges_between
%timeit list(edges_between(graph, nodes1, nodes2, assume_sparse=False, assume_disjoint=True))
%timeit list(edges_between(graph, nodes1, nodes2, assume_sparse=False, assume_disjoint=False))
%timeit list(edges_between(graph, nodes1, nodes2, assume_sparse=True, assume_disjoint=False))
%timeit list(edges_between(graph, nodes1, nodes2, assume_sparse=True, assume_disjoint=True))
graph = nx.fast_gnp_random_graph(1000, .1)
rng = np.random.RandomState(0)
print(graph.number_of_edges())
nodes1 = set(rng.choice(list(graph.nodes()), 500, replace=False))
nodes2 = set(graph.nodes()) - nodes1
edges_between = ut.nx_edges_between
%timeit list(edges_between(graph, nodes1, nodes2, assume_sparse=True, assume_disjoint=True))
%timeit list(edges_between(graph, nodes1, nodes2, assume_sparse=False, assume_disjoint=True))
Ignore:
graph = nx.DiGraph(edges)
graph = nx.Graph(edges)
nodes1 = [1, 2, 3, 4]
nodes2 = nodes1
"""
if assume_sparse:
# Method 1 is where we check the intersection of existing edges
# and the edges in the second set (faster for sparse graphs)
# helpers nx_edges between
def _node_combo_lower(graph, both):
both_lower = set([])
for u in both:
neighbs = set(graph.adj[u])
neighbsBB_lower = neighbs.intersection(both_lower)
for v in neighbsBB_lower:
yield (u, v)
both_lower.add(u)
def _node_combo_upper(graph, both):
both_upper = both.copy()
for u in both:
neighbs = set(graph.adj[u])
neighbsBB_upper = neighbs.intersection(both_upper)
for v in neighbsBB_upper:
yield (u, v)
both_upper.remove(u)
def _node_product(graph, only1, only2):
for u in only1:
neighbs = set(graph.adj[u])
neighbs12 = neighbs.intersection(only2)
for v in neighbs12:
yield (u, v)
# Test for special cases
if nodes2 is None or nodes2 is nodes1:
# Case where we just are finding internal edges
both = set(nodes1)
if graph.is_directed():
edge_sets = (
_node_combo_upper(graph, both), # B-to-B (upper)
_node_combo_lower(graph, both), # B-to-B (lower)
)
else:
edge_sets = (
_node_combo_upper(graph, both), # B-to-B (upper)
)
elif assume_disjoint:
# Case where we find edges between disjoint sets
only1 = set(nodes1)
only2 = set(nodes2)
if graph.is_directed():
edge_sets = (
_node_product(graph, only1, only2), # 1-to-2
_node_product(graph, only2, only1), # 2-to-1
)
else:
edge_sets = (
_node_product(graph, only1, only2), # 1-to-2
)
else:
# Full general case
nodes1_ = set(nodes1)
if nodes2 is None:
nodes2_ = nodes1_
else:
nodes2_ = set(nodes2)
both = nodes1_.intersection(nodes2_)
only1 = nodes1_ - both
only2 = nodes2_ - both
# This could be made faster by avoiding duplicate
# calls to set(graph.adj[u]) in the helper functions
if graph.is_directed():
edge_sets = (
_node_product(graph, only1, only2), # 1-to-2
_node_product(graph, only1, both), # 1-to-B
_node_combo_upper(graph, both), # B-to-B (u)
_node_combo_lower(graph, both), # B-to-B (l)
_node_product(graph, both, only1), # B-to-1
_node_product(graph, both, only2), # B-to-2
_node_product(graph, only2, both), # 2-to-B
_node_product(graph, only2, only1), # 2-to-1
)
else:
edge_sets = (
_node_product(graph, only1, only2), # 1-to-2
_node_product(graph, only1, both), # 1-to-B
_node_combo_upper(graph, both), # B-to-B (u)
_node_product(graph, only2, both), # 2-to-B
)
for u, v in it.chain.from_iterable(edge_sets):
yield u, v
else:
# Method 2 is where we enumerate all possible edges and just take the
# ones that exist (faster for very dense graphs)
if nodes2 is None or nodes2 is nodes1:
edge_iter = it.combinations(nodes1, 2)
else:
if assume_disjoint:
# We assume len(isect(nodes1, nodes2)) == 0
edge_iter = it.product(nodes1, nodes2)
else:
# make sure a single edge is not returned twice
# in the case where len(isect(nodes1, nodes2)) > 0
nodes1_ = set(nodes1)
nodes2_ = set(nodes2)
nodes_isect = nodes1_.intersection(nodes2_)
nodes_only1 = nodes1_ - nodes_isect
nodes_only2 = nodes2_ - nodes_isect
edge_sets = [it.product(nodes_only1, nodes_only2),
it.product(nodes_only1, nodes_isect),
it.product(nodes_only2, nodes_isect),
it.combinations(nodes_isect, 2)]
edge_iter = it.chain.from_iterable(edge_sets)
if graph.is_directed():
for n1, n2 in edge_iter:
if graph.has_edge(n1, n2):
yield n1, n2
if graph.has_edge(n2, n1):
yield n2, n1
else:
for n1, n2 in edge_iter:
if graph.has_edge(n1, n2):
yield n1, n2
def nx_delete_node_attr(graph, name, nodes=None):
"""
Removes node attributes
Doctest:
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> G = nx.karate_club_graph()
>>> nx.set_node_attributes(G, name='foo', values='bar')
>>> datas = nx.get_node_attributes(G, 'club')
>>> assert len(nx.get_node_attributes(G, 'club')) == 34
>>> assert len(nx.get_node_attributes(G, 'foo')) == 34
>>> ut.nx_delete_node_attr(G, ['club', 'foo'], nodes=[1, 2])
>>> assert len(nx.get_node_attributes(G, 'club')) == 32
>>> assert len(nx.get_node_attributes(G, 'foo')) == 32
>>> ut.nx_delete_node_attr(G, ['club'])
>>> assert len(nx.get_node_attributes(G, 'club')) == 0
>>> assert len(nx.get_node_attributes(G, 'foo')) == 32
"""
if nodes is None:
nodes = list(graph.nodes())
removed = 0
# names = [name] if not isinstance(name, list) else name
node_dict = nx_node_dict(graph)
if isinstance(name, list):
for node in nodes:
for name_ in name:
try:
del node_dict[node][name_]
removed += 1
except KeyError:
pass
else:
for node in nodes:
try:
del node_dict[node][name]
removed += 1
except KeyError:
pass
return removed
@profile
def nx_delete_edge_attr(graph, name, edges=None):
"""
Removes an attributes from specific edges in the graph
Doctest:
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> G = nx.karate_club_graph()
>>> nx.set_edge_attributes(G, name='spam', values='eggs')
>>> nx.set_edge_attributes(G, name='foo', values='bar')
>>> assert len(nx.get_edge_attributes(G, 'spam')) == 78
>>> assert len(nx.get_edge_attributes(G, 'foo')) == 78
>>> ut.nx_delete_edge_attr(G, ['spam', 'foo'], edges=[(1, 2)])
>>> assert len(nx.get_edge_attributes(G, 'spam')) == 77
>>> assert len(nx.get_edge_attributes(G, 'foo')) == 77
>>> ut.nx_delete_edge_attr(G, ['spam'])
>>> assert len(nx.get_edge_attributes(G, 'spam')) == 0
>>> assert len(nx.get_edge_attributes(G, 'foo')) == 77
Doctest:
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> G = nx.MultiGraph()
>>> G.add_edges_from([(1, 2), (2, 3), (3, 4), (4, 5), (4, 5), (1, 2)])
>>> nx.set_edge_attributes(G, name='spam', values='eggs')
>>> nx.set_edge_attributes(G, name='foo', values='bar')
>>> assert len(nx.get_edge_attributes(G, 'spam')) == 6
>>> assert len(nx.get_edge_attributes(G, 'foo')) == 6
>>> ut.nx_delete_edge_attr(G, ['spam', 'foo'], edges=[(1, 2, 0)])
>>> assert len(nx.get_edge_attributes(G, 'spam')) == 5
>>> assert len(nx.get_edge_attributes(G, 'foo')) == 5
>>> ut.nx_delete_edge_attr(G, ['spam'])
>>> assert len(nx.get_edge_attributes(G, 'spam')) == 0
>>> assert len(nx.get_edge_attributes(G, 'foo')) == 5
"""
removed = 0
keys = [name] if not isinstance(name, (list, tuple)) else name
if edges is None:
if graph.is_multigraph():
edges = graph.edges(keys=True)
else:
edges = graph.edges()
if graph.is_multigraph():
for u, v, k in edges:
for key_ in keys:
try:
del graph[u][v][k][key_]
removed += 1
except KeyError:
pass
else:
for u, v in edges:
for key_ in keys:
try:
del graph[u][v][key_]
removed += 1
except KeyError:
pass
return removed
def nx_delete_None_edge_attr(graph, edges=None):
removed = 0
if graph.is_multigraph():
if edges is None:
edges = list(graph.edges(keys=graph.is_multigraph()))
for edge in edges:
u, v, k = edge
data = graph[u][v][k]
for key in list(data.keys()):
try:
if data[key] is None:
del data[key]
removed += 1
except KeyError:
pass
else:
if edges is None:
edges = list(graph.edges())
for edge in graph.edges():
u, v = edge
data = graph[u][v]
for key in list(data.keys()):
try:
if data[key] is None:
del data[key]
removed += 1
except KeyError:
pass
return removed
def nx_delete_None_node_attr(graph, nodes=None):
removed = 0
if nodes is None:
nodes = list(graph.nodes())
for node in graph.nodes():
node_dict = nx_node_dict(graph)
data = node_dict[node]
for key in list(data.keys()):
try:
if data[key] is None:
del data[key]
removed += 1
except KeyError:
pass
return removed
def nx_set_default_node_attributes(graph, key, val):
unset_nodes = [n for n, d in graph.nodes(data=True) if key not in d]
if isinstance(val, dict):
values = {n: val[n] for n in unset_nodes if n in val}
else:
values = {n: val for n in unset_nodes}
nx.set_node_attributes(graph, name=key, values=values)
def nx_set_default_edge_attributes(graph, key, val):
unset_edges = [(u, v) for u, v, d in graph.edges(data=True) if key not in d]
if isinstance(val, dict):
values = {e: val[e] for e in unset_edges if e in val}
else:
values = {e: val for e in unset_edges}
nx.set_edge_attributes(graph, name=key, values=values)
def nx_get_default_edge_attributes(graph, key, default=None):
import utool as ut
edge_list = list(graph.edges())
partial_attr_dict = nx.get_edge_attributes(graph, key)
attr_dict = ut.dict_subset(partial_attr_dict, edge_list, default=default)
return attr_dict
def nx_get_default_node_attributes(graph, key, default=None):
import utool as ut
node_list = list(graph.nodes())
partial_attr_dict = nx.get_node_attributes(graph, key)
attr_dict = ut.dict_subset(partial_attr_dict, node_list, default=default)
return attr_dict
def nx_gen_node_values(G, key, nodes, default=util_const.NoParam):
"""
Generates attributes values of specific nodes
"""
node_dict = nx_node_dict(G)
if default is util_const.NoParam:
return (node_dict[n][key] for n in nodes)
else:
return (node_dict[n].get(key, default) for n in nodes)
def nx_gen_node_attrs(G, key, nodes=None, default=util_const.NoParam,
on_missing='error', on_keyerr='default'):
"""
Improved generator version of nx.get_node_attributes
Args:
on_missing (str): Strategy for handling nodes missing from G.
Can be {'error', 'default', 'filter'}. defaults to 'error'.
on_keyerr (str): Strategy for handling keys missing from node dicts.
Can be {'error', 'default', 'filter'}. defaults to 'default'
if default is specified, otherwise defaults to 'error'.
Notes:
strategies are:
error - raises an error if key or node does not exist
default - returns node, but uses value specified by default
filter - skips the node
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> G = nx.Graph([(1, 2), (2, 3)])
>>> nx.set_node_attributes(G, name='part', values={1: 'bar', 3: 'baz'})
>>> nodes = [1, 2, 3, 4]
>>> #
>>> assert len(list(ut.nx_gen_node_attrs(G, 'part', default=None, on_missing='error', on_keyerr='default'))) == 3
>>> assert len(list(ut.nx_gen_node_attrs(G, 'part', default=None, on_missing='error', on_keyerr='filter'))) == 2
>>> ut.assert_raises(KeyError, list, ut.nx_gen_node_attrs(G, 'part', on_missing='error', on_keyerr='error'))
>>> #
>>> assert len(list(ut.nx_gen_node_attrs(G, 'part', nodes, default=None, on_missing='filter', on_keyerr='default'))) == 3
>>> assert len(list(ut.nx_gen_node_attrs(G, 'part', nodes, default=None, on_missing='filter', on_keyerr='filter'))) == 2
>>> ut.assert_raises(KeyError, list, ut.nx_gen_node_attrs(G, 'part', nodes, on_missing='filter', on_keyerr='error'))
>>> #
>>> assert len(list(ut.nx_gen_node_attrs(G, 'part', nodes, default=None, on_missing='default', on_keyerr='default'))) == 4
>>> assert len(list(ut.nx_gen_node_attrs(G, 'part', nodes, default=None, on_missing='default', on_keyerr='filter'))) == 2
>>> ut.assert_raises(KeyError, list, ut.nx_gen_node_attrs(G, 'part', nodes, on_missing='default', on_keyerr='error'))
Example:
>>> # DISABLE_DOCTEST
>>> # ALL CASES
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> G = nx.Graph([(1, 2), (2, 3)])
>>> nx.set_node_attributes(G, name='full', values={1: 'A', 2: 'B', 3: 'C'})
>>> nx.set_node_attributes(G, name='part', values={1: 'bar', 3: 'baz'})
>>> nodes = [1, 2, 3, 4]
>>> attrs = dict(ut.nx_gen_node_attrs(G, 'full'))
>>> input_grid = {
>>> 'nodes': [None, (1, 2, 3, 4)],
>>> 'key': ['part', 'full'],
>>> 'default': [util_const.NoParam, None],
>>> }
>>> inputs = ut.all_dict_combinations(input_grid)
>>> kw_grid = {
>>> 'on_missing': ['error', 'default', 'filter'],
>>> 'on_keyerr': ['error', 'default', 'filter'],
>>> }
>>> kws = ut.all_dict_combinations(kw_grid)
>>> for in_ in inputs:
>>> for kw in kws:
>>> kw2 = ut.dict_union(kw, in_)
>>> #print(kw2)
>>> on_missing = kw['on_missing']
>>> on_keyerr = kw['on_keyerr']
>>> if on_keyerr == 'default' and in_['default'] is util_const.NoParam:
>>> on_keyerr = 'error'
>>> will_miss = False
>>> will_keyerr = False
>>> if on_missing == 'error':
>>> if in_['key'] == 'part' and in_['nodes'] is not None:
>>> will_miss = True
>>> if in_['key'] == 'full' and in_['nodes'] is not None:
>>> will_miss = True
>>> if on_keyerr == 'error':
>>> if in_['key'] == 'part':
>>> will_keyerr = True
>>> if on_missing == 'default':
>>> if in_['key'] == 'full' and in_['nodes'] is not None:
>>> will_keyerr = True
>>> want_error = will_miss or will_keyerr
>>> gen = ut.nx_gen_node_attrs(G, **kw2)
>>> try:
>>> attrs = list(gen)
>>> except KeyError:
>>> if not want_error:
>>> raise AssertionError('should not have errored')
>>> else:
>>> if want_error:
>>> raise AssertionError('should have errored')
"""
if on_missing is None:
on_missing = 'error'
if default is util_const.NoParam and on_keyerr == 'default':
on_keyerr = 'error'
if nodes is None:
nodes = G.nodes()
# Generate `node_data` nodes and data dictionary
node_dict = nx_node_dict(G)
if on_missing == 'error':
node_data = ((n, node_dict[n]) for n in nodes)
elif on_missing == 'filter':
node_data = ((n, node_dict[n]) for n in nodes if n in G)
elif on_missing == 'default':
node_data = ((n, node_dict.get(n, {})) for n in nodes)
else:
raise KeyError('on_missing={} must be error, filter or default'.format(
on_missing))
# Get `node_attrs` desired value out of dictionary
if on_keyerr == 'error':
node_attrs = ((n, d[key]) for n, d in node_data)
elif on_keyerr == 'filter':
node_attrs = ((n, d[key]) for n, d in node_data if key in d)
elif on_keyerr == 'default':
node_attrs = ((n, d.get(key, default)) for n, d in node_data)
else:
raise KeyError('on_keyerr={} must be error filter or default'.format(on_keyerr))
return node_attrs
def nx_gen_edge_values(G, key, edges=None, default=util_const.NoParam,
on_missing='error', on_keyerr='default'):
"""
Generates attributes values of specific edges
Args:
on_missing (str): Strategy for handling nodes missing from G.
Can be {'error', 'default'}. defaults to 'error'.
on_keyerr (str): Strategy for handling keys missing from node dicts.
Can be {'error', 'default'}. defaults to 'default'
if default is specified, otherwise defaults to 'error'.
"""
if edges is None:
edges = G.edges()
if on_missing is None:
on_missing = 'error'
if on_keyerr is None:
on_keyerr = 'default'
if default is util_const.NoParam and on_keyerr == 'default':
on_keyerr = 'error'
# Generate `data_iter` edges and data dictionary
if on_missing == 'error':
data_iter = (G.adj[u][v] for u, v in edges)
elif on_missing == 'default':
data_iter = (G.adj[u][v] if G.has_edge(u, v) else {}
for u, v in edges)
else:
raise KeyError('on_missing={} must be error, filter or default'.format(
on_missing))
# Get `value_iter` desired value out of dictionary
if on_keyerr == 'error':
value_iter = (d[key] for d in data_iter)
elif on_keyerr == 'default':
value_iter = (d.get(key, default) for d in data_iter)
else:
raise KeyError('on_keyerr={} must be error or default'.format(on_keyerr))
return value_iter
# if default is util_const.NoParam:
# return (G.adj[u][v][key] for u, v in edges)
# else:
# return (G.adj[u][v].get(key, default) for u, v in edges)
def nx_gen_edge_attrs(G, key, edges=None, default=util_const.NoParam,
on_missing='error', on_keyerr='default'):
"""
Improved generator version of nx.get_edge_attributes
Args:
on_missing (str): Strategy for handling nodes missing from G.
Can be {'error', 'default', 'filter'}. defaults to 'error'.
is on_missing is not error, then we allow any edge even if the
endpoints are not in the graph.
on_keyerr (str): Strategy for handling keys missing from node dicts.
Can be {'error', 'default', 'filter'}. defaults to 'default'
if default is specified, otherwise defaults to 'error'.
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> G = nx.Graph([(1, 2), (2, 3), (3, 4)])
>>> nx.set_edge_attributes(G, name='part', values={(1, 2): 'bar', (2, 3): 'baz'})
>>> edges = [(1, 2), (2, 3), (3, 4), (4, 5)]
>>> func = ut.partial(ut.nx_gen_edge_attrs, G, 'part', default=None)
>>> #
>>> assert len(list(func(on_missing='error', on_keyerr='default'))) == 3
>>> assert len(list(func(on_missing='error', on_keyerr='filter'))) == 2
>>> ut.assert_raises(KeyError, list, func(on_missing='error', on_keyerr='error'))
>>> #
>>> assert len(list(func(edges, on_missing='filter', on_keyerr='default'))) == 3
>>> assert len(list(func(edges, on_missing='filter', on_keyerr='filter'))) == 2
>>> ut.assert_raises(KeyError, list, func(edges, on_missing='filter', on_keyerr='error'))
>>> #
>>> assert len(list(func(edges, on_missing='default', on_keyerr='default'))) == 4
>>> assert len(list(func(edges, on_missing='default', on_keyerr='filter'))) == 2
>>> ut.assert_raises(KeyError, list, func(edges, on_missing='default', on_keyerr='error'))
"""
if on_missing is None:
on_missing = 'error'
if default is util_const.NoParam and on_keyerr == 'default':
on_keyerr = 'error'
if edges is None:
if G.is_multigraph():
raise NotImplementedError('')
# uvk_iter = G.edges(keys=True)
else:
edges = G.edges()
# Generate `edge_data` edges and data dictionary
if on_missing == 'error':
edge_data = (((u, v), G.adj[u][v]) for u, v in edges)
elif on_missing == 'filter':
edge_data = (((u, v), G.adj[u][v]) for u, v in edges if G.has_edge(u, v))
elif on_missing == 'default':
edge_data = (((u, v), G.adj[u][v])
if G.has_edge(u, v) else ((u, v), {})
for u, v in edges)
else:
raise KeyError('on_missing={}'.format(on_missing))
# Get `edge_attrs` desired value out of dictionary
if on_keyerr == 'error':
edge_attrs = ((e, d[key]) for e, d in edge_data)
elif on_keyerr == 'filter':
edge_attrs = ((e, d[key]) for e, d in edge_data if key in d)
elif on_keyerr == 'default':
edge_attrs = ((e, d.get(key, default)) for e, d in edge_data)
else:
raise KeyError('on_keyerr={}'.format(on_keyerr))
return edge_attrs
# if edges is None:
# if G.is_multigraph():
# edges_ = G.edges(keys=True, data=True)
# else:
# edges_ = G.edges(data=True)
# if default is util_const.NoParam:
# return ((x[:-1], x[-1][key]) for x in edges_ if key in x[-1])
# else:
# return ((x[:-1], x[-1].get(key, default)) for x in edges_)
# else:
# if on_missing == 'error':
# uv_iter = edges
# uvd_iter = ((u, v, G.adj[u][v]) for u, v in uv_iter)
# elif on_missing == 'filter':
# # filter edges that don't exist
# uv_iter = (e for e in edges if G.has_edge(*e))
# uvd_iter = ((u, v, G.adj[u][v]) for u, v in uv_iter)
# elif on_missing == 'default':
# # Return default data as if it existed
# uvd_iter = (
# (u, v, G.adj[u][v])
# if G.has_edge(u, v) else
# (u, v, {})
# for u, v in uv_iter
# )
# else:
# raise KeyError('on_missing={}'.format(on_missing))
# if default is util_const.NoParam:
# # return (((u, v), d[key]) for u, v, d in uvd_iter if key in d)
# return (((u, v), d[key]) for u, v, d in uvd_iter)
# else:
# uvd_iter = ((u, v, G.adj[u][v]) for u, v in uv_iter)
# return (((u, v), d.get(key, default)) for u, v, d in uvd_iter)
def nx_from_node_edge(nodes=None, edges=None):
graph = nx.Graph()
if nodes:
graph.add_nodes_from(nodes)
if edges:
graph.add_edges_from(edges)
return graph
def nx_minimum_weight_component(graph, weight='weight'):
""" A minimum weight component is an MST + all negative edges """
mwc = nx.minimum_spanning_tree(graph, weight=weight)
# negative edges only reduce the total weight
neg_edges = (e for e, w in nx_gen_edge_attrs(graph, weight) if w < 0)
mwc.add_edges_from(neg_edges)
return mwc
def nx_from_matrix(weight_matrix, nodes=None, remove_self=True):
import utool as ut
import numpy as np
if nodes is None:
nodes = list(range(len(weight_matrix)))
weight_list = weight_matrix.ravel()
flat_idxs_ = np.arange(weight_matrix.size)
multi_idxs_ = np.unravel_index(flat_idxs_, weight_matrix.shape)
# Remove 0 weight edges
flags = np.logical_not(np.isclose(weight_list, 0))
weight_list = ut.compress(weight_list, flags)
multi_idxs = ut.compress(list(zip(*multi_idxs_)), flags)
edge_list = ut.lmap(tuple, ut.unflat_take(nodes, multi_idxs))
if remove_self:
flags = [e1 != e2 for e1, e2 in edge_list]
edge_list = ut.compress(edge_list, flags)
weight_list = ut.compress(weight_list, flags)
graph = nx.Graph()
graph.add_nodes_from(nodes)
graph.add_edges_from(edge_list)
label_list = ['%.2f' % w for w in weight_list]
nx.set_edge_attributes(graph, name='weight', values=dict(zip(edge_list, weight_list)))
nx.set_edge_attributes(graph, name='label', values=dict(zip(edge_list, label_list)))
return graph
def nx_ensure_agraph_color(graph):
""" changes colors to hex strings on graph attrs """
try:
from plottool_ibeis import color_funcs
import plottool_ibeis as pt
except ImportError:
from plottool import color_funcs
import plottool as pt
#import six
def _fix_agraph_color(data):
try:
orig_color = data.get('color', None)
alpha = data.get('alpha', None)
color = orig_color
if color is None and alpha is not None:
color = [0, 0, 0]
if color is not None:
color = pt.ensure_nonhex_color(color)
#if isinstance(color, np.ndarray):
# color = color.tolist()
color = list(color_funcs.ensure_base255(color))
if alpha is not None:
if len(color) == 3:
color += [int(alpha * 255)]
else:
color[3] = int(alpha * 255)
color = tuple(color)
if len(color) == 3:
data['color'] = '#%02x%02x%02x' % color
else:
data['color'] = '#%02x%02x%02x%02x' % color
except Exception as ex:
import utool as ut
ut.printex(ex, keys=['color', 'orig_color', 'data'])
raise
for node, node_data in graph.nodes(data=True):
data = node_data
_fix_agraph_color(data)
for u, v, edge_data in graph.edges(data=True):
data = edge_data
_fix_agraph_color(data)
def nx_edges(graph, keys=False, data=False):
if graph.is_multigraph():
edges = graph.edges(keys=keys, data=data)
else:
edges = graph.edges(data=data)
#if keys:
# edges = [e[0:2] + (0,) + e[:2] for e in edges]
return edges
def dag_longest_path(graph, source, target):
"""
Finds the longest path in a dag between two nodes
"""
if source == target:
return [source]
allpaths = nx.all_simple_paths(graph, source, target)
longest_path = []
for l in allpaths:
if len(l) > len(longest_path):
longest_path = l
return longest_path
def testdata_graph():
r"""
Returns:
tuple: (graph, G)
CommandLine:
python -m utool.util_graph --exec-testdata_graph --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> (graph, G) = testdata_graph()
>>> import plottool as pt
>>> ut.ensureqt()
>>> pt.show_nx(G, layout='agraph')
>>> ut.show_if_requested()
"""
import utool as ut
# Define adjacency list
graph = {
'a': ['b'],
'b': ['c', 'f', 'e'],
'c': ['g', 'd'],
'd': ['c', 'h'],
'e': ['a', 'f'],
'f': ['g'],
'g': ['f'],
'h': ['g', 'd'],
'i': ['j'],
'j': [],
}
graph = {
'a': ['b'],
'b': ['c'],
'c': ['d'],
'd': ['a', 'e'],
'e': ['c'],
}
#graph = {'a': ['b'], 'b': ['c'], 'c': ['d'], 'd': ['a']}
#graph = {'a': ['b'], 'b': ['c'], 'c': ['d'], 'd': ['e'], 'e': ['a']}
graph = {'a': ['b'], 'b': ['c'], 'c': ['d'], 'd': ['e'], 'e': ['a'], 'f': ['c']}
#graph = {'a': ['b'], 'b': ['c'], 'c': ['d'], 'd': ['e'], 'e': ['b']}
graph = {'a': ['b', 'c', 'd'], 'e': ['d'], 'f': ['d', 'e'], 'b': [], 'c': [], 'd': []} # double pair in non-scc
graph = {'a': ['b', 'c', 'd'], 'e': ['d'], 'f': ['d', 'e'], 'b': [], 'c': [], 'd': ['e']} # double pair in non-scc
#graph = {'a': ['b', 'c', 'd'], 'e': ['d', 'f'], 'f': ['d', 'e'], 'b': [], 'c': [], 'd': ['e']} # double pair in non-scc
#graph = {'a': ['b', 'c', 'd'], 'e': ['d', 'c'], 'f': ['d', 'e'], 'b': ['e'], 'c': ['e'], 'd': ['e']} # double pair in non-scc
graph = {'a': ['b', 'c', 'd'], 'e': ['d', 'c'], 'f': ['d', 'e'], 'b': ['e'], 'c': ['e', 'b'], 'd': ['e']} # double pair in non-scc
# Extract G = (V, E)
nodes = list(graph.keys())
edges = ut.flatten([[(v1, v2) for v2 in v2s] for v1, v2s in graph.items()])
G = nx.DiGraph()
G.add_nodes_from(nodes)
G.add_edges_from(edges)
if False:
G.remove_node('e')
del graph['e']
for val in graph.values():
try:
val.remove('e')
except ValueError:
pass
return graph, G
def dict_depth(dict_, accum=0):
if not isinstance(dict_, dict):
return accum
return max([dict_depth(val, accum + 1)
for key, val in dict_.items()])
def edges_to_adjacency_list(edges):
import utool as ut
children_, parents_ = list(zip(*edges))
parent_to_children = ut.group_items(parents_, children_)
#to_leafs = {tablename: path_to_leafs(tablename, parent_to_children)}
return parent_to_children
def paths_to_root(tablename, root, child_to_parents):
"""
CommandLine:
python -m utool.util_graph --exec-paths_to_root:0
python -m utool.util_graph --exec-paths_to_root:1
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> child_to_parents = {
>>> 'chip': ['dummy_annot'],
>>> 'chipmask': ['dummy_annot'],
>>> 'descriptor': ['keypoint'],
>>> 'fgweight': ['keypoint', 'probchip'],
>>> 'keypoint': ['chip'],
>>> 'notch': ['dummy_annot'],
>>> 'probchip': ['dummy_annot'],
>>> 'spam': ['fgweight', 'chip', 'keypoint']
>>> }
>>> root = 'dummy_annot'
>>> tablename = 'fgweight'
>>> to_root = paths_to_root(tablename, root, child_to_parents)
>>> result = ut.repr3(to_root)
>>> print(result)
{
'keypoint': {
'chip': {
'dummy_annot': None,
},
},
'probchip': {
'dummy_annot': None,
},
}
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> root = u'annotations'
>>> tablename = u'Notch_Tips'
>>> child_to_parents = {
>>> 'Block_Curvature': [
>>> 'Trailing_Edge',
>>> ],
>>> 'Has_Notch': [
>>> 'annotations',
>>> ],
>>> 'Notch_Tips': [
>>> 'annotations',
>>> ],
>>> 'Trailing_Edge': [
>>> 'Notch_Tips',
>>> ],
>>> }
>>> to_root = paths_to_root(tablename, root, child_to_parents)
>>> result = ut.repr3(to_root)
>>> print(result)
"""
if tablename == root:
return None
parents = child_to_parents[tablename]
return {parent: paths_to_root(parent, root, child_to_parents)
for parent in parents}
def get_allkeys(dict_):
import utool as ut
if not isinstance(dict_, dict):
return []
subkeys = [[key] + get_allkeys(val)
for key, val in dict_.items()]
return ut.unique_ordered(ut.flatten(subkeys))
def traverse_path(start, end, seen_, allkeys, mat):
import utool as ut
if seen_ is None:
seen_ = set([])
index = allkeys.index(start)
sub_indexes = np.where(mat[index])[0]
if len(sub_indexes) > 0:
subkeys = ut.take(allkeys, sub_indexes)
# subkeys_ = ut.take(allkeys, sub_indexes)
# subkeys = [subkey for subkey in subkeys_
# if subkey not in seen_]
# for sk in subkeys:
# seen_.add(sk)
if len(subkeys) > 0:
return {subkey: traverse_path(subkey, end, seen_, allkeys, mat)
for subkey in subkeys}
return None
def reverse_path(dict_, root, child_to_parents):
"""
CommandLine:
python -m utool.util_graph --exec-reverse_path --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> child_to_parents = {
>>> 'chip': ['dummy_annot'],
>>> 'chipmask': ['dummy_annot'],
>>> 'descriptor': ['keypoint'],
>>> 'fgweight': ['keypoint', 'probchip'],
>>> 'keypoint': ['chip'],
>>> 'notch': ['dummy_annot'],
>>> 'probchip': ['dummy_annot'],
>>> 'spam': ['fgweight', 'chip', 'keypoint']
>>> }
>>> to_root = {
>>> 'fgweight': {
>>> 'keypoint': {
>>> 'chip': {
>>> 'dummy_annot': None,
>>> },
>>> },
>>> 'probchip': {
>>> 'dummy_annot': None,
>>> },
>>> },
>>> }
>>> reversed_ = reverse_path(to_root, 'dummy_annot', child_to_parents)
>>> result = ut.repr3(reversed_)
>>> print(result)
{
'dummy_annot': {
'chip': {
'keypoint': {
'fgweight': None,
},
},
'probchip': {
'fgweight': None,
},
},
}
"""
# Hacky but illustrative
# TODO; implement non-hacky version
allkeys = get_allkeys(dict_)
mat = np.zeros((len(allkeys), len(allkeys)))
for key in allkeys:
if key != root:
for parent in child_to_parents[key]:
rx = allkeys.index(parent)
cx = allkeys.index(key)
mat[rx][cx] = 1
end = None
seen_ = set([])
reversed_ = {root: traverse_path(root, end, seen_, allkeys, mat)}
return reversed_
def get_levels(dict_, n=0, levels=None):
r"""
DEPCIRATE
Args:
dict_ (dict_): a dictionary
n (int): (default = 0)
levels (None): (default = None)
CommandLine:
python -m utool.util_graph --test-get_levels --show
python3 -m utool.util_graph --test-get_levels --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> from_root = {
>>> 'dummy_annot': {
>>> 'chip': {
>>> 'keypoint': {
>>> 'fgweight': None,
>>> },
>>> },
>>> 'probchip': {
>>> 'fgweight': None,
>>> },
>>> },
>>> }
>>> dict_ = from_root
>>> n = 0
>>> levels = None
>>> levels_ = get_levels(dict_, n, levels)
>>> result = ut.repr2(levels_, nl=1)
>>> print(result)
[
['dummy_annot'],
['chip', 'probchip'],
['keypoint', 'fgweight'],
['fgweight'],
]
"""
if levels is None:
levels_ = [[] for _ in range(dict_depth(dict_))]
else:
levels_ = levels
if dict_ is None:
return []
for key in dict_.keys():
levels_[n].append(key)
for val in dict_.values():
get_levels(val, n + 1, levels_)
return levels_
def longest_levels(levels_):
r"""
Args:
levels_ (list):
CommandLine:
python -m utool.util_graph --exec-longest_levels --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> levels_ = [
>>> ['dummy_annot'],
>>> ['chip', 'probchip'],
>>> ['keypoint', 'fgweight'],
>>> ['fgweight'],
>>> ]
>>> new_levels = longest_levels(levels_)
>>> result = ('new_levels = %s' % (ut.repr2(new_levels, nl=1),))
>>> print(result)
new_levels = [
['dummy_annot'],
['chip', 'probchip'],
['keypoint'],
['fgweight'],
]
"""
return shortest_levels(levels_[::-1])[::-1]
# seen_ = set([])
# new_levels = []
# for level in levels_[::-1]:
# new_level = [item for item in level if item not in seen_]
# seen_ = seen_.union(set(new_level))
# new_levels.append(new_level)
# new_levels = new_levels[::-1]
# return new_levels
def shortest_levels(levels_):
r"""
Args:
levels_ (list):
CommandLine:
python -m utool.util_graph --exec-shortest_levels --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> levels_ = [
>>> ['dummy_annot'],
>>> ['chip', 'probchip'],
>>> ['keypoint', 'fgweight'],
>>> ['fgweight'],
>>> ]
>>> new_levels = shortest_levels(levels_)
>>> result = ('new_levels = %s' % (ut.repr2(new_levels, nl=1),))
>>> print(result)
new_levels = [
['dummy_annot'],
['chip', 'probchip'],
['keypoint', 'fgweight'],
]
"""
seen_ = set([])
new_levels = []
for level in levels_:
new_level = [item for item in level if item not in seen_]
seen_ = seen_.union(set(new_level))
if len(new_level) > 0:
new_levels.append(new_level)
new_levels = new_levels
return new_levels
def simplify_graph(graph):
"""
strips out everything but connectivity
Args:
graph (nx.Graph):
Returns:
nx.Graph: new_graph
CommandLine:
python3 -m utool.util_graph simplify_graph --show
python2 -m utool.util_graph simplify_graph --show
python2 -c "import networkx as nx; print(nx.__version__)"
python3 -c "import networkx as nx; print(nx.__version__)"
Ignore:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> graph = nx.DiGraph([('a', 'b'), ('a', 'c'), ('a', 'e'),
>>> ('a', 'd'), ('b', 'd'), ('c', 'e'),
>>> ('d', 'e'), ('c', 'e'), ('c', 'd')])
>>> new_graph = simplify_graph(graph)
>>> result = ut.repr2(list(new_graph.edges()))
>>> #adj_list = sorted(list(nx.generate_adjlist(new_graph)))
>>> #result = ut.repr2(adj_list)
>>> print(result)
[(0, 1), (0, 2), (0, 3), (0, 4), (1, 3), (2, 3), (2, 4), (3, 4)]
['0 1 2 3 4', '1 3 4', '2 4', '3', '4 3']
"""
import utool as ut
nodes = sorted(list(graph.nodes()))
node_lookup = ut.make_index_lookup(nodes)
if graph.is_multigraph():
edges = list(graph.edges(keys=True))
else:
edges = list(graph.edges())
new_nodes = ut.take(node_lookup, nodes)
if graph.is_multigraph():
new_edges = [(node_lookup[e[0]], node_lookup[e[1]], e[2], {}) for e in edges]
else:
new_edges = [(node_lookup[e[0]], node_lookup[e[1]]) for e in edges]
cls = graph.__class__
new_graph = cls()
new_graph.add_nodes_from(new_nodes)
new_graph.add_edges_from(new_edges)
return new_graph
def subgraph_from_edges(G, edge_list, ref_back=True):
"""
Creates a networkx graph that is a subgraph of G
defined by the list of edges in edge_list.
Requires G to be a networkx MultiGraph or MultiDiGraph
edge_list is a list of edges in either (u,v) or (u,v,d) form
where u and v are nodes comprising an edge,
and d would be a dictionary of edge attributes
ref_back determines whether the created subgraph refers to back
to the original graph and therefore changes to the subgraph's
attributes also affect the original graph, or if it is to create a
new copy of the original graph.
References:
http://stackoverflow.com/questions/16150557/nx-subgraph-from-edges
"""
# TODO: support multi-di-graph
sub_nodes = list({y for x in edge_list for y in x[0:2]})
#edge_list_no_data = [edge[0:2] for edge in edge_list]
multi_edge_list = [edge[0:3] for edge in edge_list]
if ref_back:
G_sub = G.subgraph(sub_nodes)
for edge in G_sub.edges(keys=True):
if edge not in multi_edge_list:
G_sub.remove_edge(*edge)
else:
G_sub = G.subgraph(sub_nodes).copy()
for edge in G_sub.edges(keys=True):
if edge not in multi_edge_list:
G_sub.remove_edge(*edge)
return G_sub
def nx_node_dict(G):
if nx.__version__.startswith('1'):
return getattr(G, 'node')
else:
return G.nodes
def all_multi_paths(graph, source, target, data=False):
r"""
Returns specific paths along multi-edges from the source to this table.
Multipaths are identified by edge keys.
Returns all paths from source to target. This function treats multi-edges
as distinct and returns the key value in each edge tuple that defines a
path.
Example:
>>> # DISABLE_DOCTEST
>>> from dtool.depcache_control import * # NOQA
>>> from utool.util_graph import * # NOQA
>>> from dtool.example_depcache import testdata_depc
>>> depc = testdata_depc()
>>> graph = depc.graph
>>> source = depc.root
>>> target = 'notchpair'
>>> path_list1 = ut.all_multi_paths(graph, depc.root, 'notchpair')
>>> path_list2 = ut.all_multi_paths(graph, depc.root, 'spam')
>>> result1 = ('path_list1 = %s' % ut.repr3(path_list1, nl=1))
>>> result2 = ('path_list2 = %s' % ut.repr3(path_list2, nl=2))
>>> result = '\n'.join([result1, result2])
>>> print(result)
path_list1 = [
[('dummy_annot', 'notch', 0), ('notch', 'notchpair', 0)],
[('dummy_annot', 'notch', 0), ('notch', 'notchpair', 1)],
]
path_list2 = [
[
('dummy_annot', 'chip', 0),
('chip', 'keypoint', 0),
('keypoint', 'fgweight', 0),
('fgweight', 'spam', 0),
],
[
('dummy_annot', 'chip', 0),
('chip', 'keypoint', 0),
('keypoint', 'spam', 0),
],
[
('dummy_annot', 'chip', 0),
('chip', 'spam', 0),
],
[
('dummy_annot', 'probchip', 0),
('probchip', 'fgweight', 0),
('fgweight', 'spam', 0),
],
]
"""
path_multiedges = list(nx_all_simple_edge_paths(graph, source, target,
keys=True, data=data))
return path_multiedges
def reverse_path_edges(edge_list):
return [(edge[1], edge[0],) + tuple(edge[2:]) for edge in edge_list][::-1]
def bfs_multi_edges(G, source, reverse=False, keys=True, data=False):
"""Produce edges in a breadth-first-search starting at source.
-----
Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py
by D. Eppstein, July 2004.
"""
from collections import deque
from functools import partial
if reverse:
G = G.reverse()
edges_iter = partial(G.edges_iter, keys=keys, data=data)
list(G.edges_iter('multitest', keys=True, data=True))
visited_nodes = set([source])
# visited_edges = set([])
queue = deque([(source, edges_iter(source))])
while queue:
parent, edges = queue[0]
try:
edge = next(edges)
edge_nodata = edge[0:3]
# if edge_nodata not in visited_edges:
yield edge
# visited_edges.add(edge_nodata)
child = edge_nodata[1]
if child not in visited_nodes:
visited_nodes.add(child)
queue.append((child, edges_iter(child)))
except StopIteration:
queue.popleft()
def dfs_conditional(G, source, state, can_cross):
"""
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import *
>>> G = nx.Graph()
>>> G.add_edges_from([(1, 2), (2, 3), (3, 4), (4, 5)])
>>> G.adj[2][3]['lava'] = True
>>> G.adj[3][4]['lava'] = True
>>> def can_cross(G, edge, state):
>>> # can only cross lava once, then your lava protection wears off
>>> data = G.get_edge_data(*edge)
>>> lava = int(data.get('lava', False))
>>> if not lava or state == 0:
>>> return True, state + lava
>>> return False, lava
>>> assert 5 not in dfs_conditional(G, 1, state=0, can_cross=can_cross)
>>> G.adj[3][4]['lava'] = False
>>> assert 5 in dfs_conditional(G, 1, state=0, can_cross=can_cross)
"""
# stack based version
visited = {source}
stack = [(source, iter(G[source]), state)]
while stack:
parent, children, state = stack[-1]
try:
child = next(children)
if child not in visited:
edge = (parent, child)
flag, new_state = can_cross(G, edge, state)
if flag:
yield child
visited.add(child)
stack.append((child, iter(G[child]), new_state))
except StopIteration:
stack.pop()
def bfs_conditional(G, source, reverse=False, keys=True, data=False,
yield_nodes=True, yield_if=None,
continue_if=None, visited_nodes=None,
yield_source=False):
"""
Produce edges in a breadth-first-search starting at source, but only return
nodes that satisfiy a condition, and only iterate past a node if it
satisfies a different condition.
conditions are callables that take (G, child, edge) and return true or false
CommandLine:
python -m utool.util_graph bfs_conditional
Example:
>>> # DISABLE_DOCTEST
>>> import networkx as nx
>>> import utool as ut
>>> G = nx.Graph()
>>> G.add_edges_from([(1, 2), (1, 3), (2, 3), (2, 4)])
>>> continue_if = lambda G, child, edge: True
>>> result = list(ut.bfs_conditional(G, 1, yield_nodes=False))
>>> print(result)
[(1, 2), (1, 3), (2, 1), (2, 3), (2, 4), (3, 1), (3, 2), (4, 2)]
Example:
>>> # ENABLE_DOCTEST
>>> import networkx as nx
>>> import utool as ut
>>> G = nx.Graph()
>>> continue_if = lambda G, child, edge: (child % 2 == 0)
>>> yield_if = lambda G, child, edge: (child % 2 == 1)
>>> G.add_edges_from([(0, 1), (1, 3), (3, 5), (5, 10),
>>> (4, 3), (3, 6),
>>> (0, 2), (2, 4), (4, 6), (6, 10)])
>>> result = list(ut.bfs_conditional(G, 0, continue_if=continue_if,
>>> yield_if=yield_if))
>>> print(result)
[1, 3, 5]
"""
if reverse and hasattr(G, 'reverse'):
G = G.reverse()
if isinstance(G, nx.Graph):
neighbors = functools.partial(G.edges, data=data)
else:
neighbors = functools.partial(G.edges, keys=keys, data=data)
queue = collections.deque([])
if visited_nodes is None:
visited_nodes = set([])
else:
visited_nodes = set(visited_nodes)
if source not in visited_nodes:
if yield_nodes and yield_source:
yield source
visited_nodes.add(source)
new_edges = neighbors(source)
if isinstance(new_edges, list):
new_edges = iter(new_edges)
queue.append((source, new_edges))
while queue:
parent, edges = queue[0]
for edge in edges:
child = edge[1]
if yield_nodes:
if child not in visited_nodes:
if yield_if is None or yield_if(G, child, edge):
yield child
else:
if yield_if is None or yield_if(G, child, edge):
yield edge
if child not in visited_nodes:
visited_nodes.add(child)
# Add new children to queue if the condition is satisfied
if continue_if is None or continue_if(G, child, edge):
new_edges = neighbors(child)
if isinstance(new_edges, list):
new_edges = iter(new_edges)
queue.append((child, new_edges))
queue.popleft()
def color_nodes(graph, labelattr='label', brightness=.878,
outof=None, sat_adjust=None):
""" Colors edges and nodes by nid """
try:
import plottool_ibeis as pt
except ImportError:
import plottool as pt
import utool as ut
node_to_lbl = nx.get_node_attributes(graph, labelattr)
unique_lbls = sorted(set(node_to_lbl.values()))
ncolors = len(unique_lbls)
if outof is None:
if (ncolors) == 1:
unique_colors = [pt.LIGHT_BLUE]
elif (ncolors) == 2:
# https://matplotlib.org/examples/color/named_colors.html
unique_colors = ['royalblue', 'orange']
unique_colors = list(map(pt.color_funcs.ensure_base01, unique_colors))
else:
unique_colors = pt.distinct_colors(ncolors, brightness=brightness)
else:
unique_colors = pt.distinct_colors(outof, brightness=brightness)
if sat_adjust:
unique_colors = [
pt.color_funcs.adjust_hsv_of_rgb(c, sat_adjust=sat_adjust)
for c in unique_colors
]
# Find edges and aids strictly between two nids
if outof is None:
lbl_to_color = ut.dzip(unique_lbls, unique_colors)
else:
gray = pt.color_funcs.ensure_base01('lightgray')
unique_colors = [gray] + unique_colors
offset = max(1, min(unique_lbls)) - 1
node_to_lbl = ut.map_vals(lambda nid: max(0, nid - offset), node_to_lbl)
lbl_to_color = ut.dzip(range(outof + 1), unique_colors)
node_to_color = ut.map_vals(lbl_to_color, node_to_lbl)
nx.set_node_attributes(graph, name='color', values=node_to_color)
ut.nx_ensure_agraph_color(graph)
def graph_info(graph, ignore=None, stats=False, verbose=False):
import utool as ut
node_dict = nx_node_dict(graph)
node_attrs = list(node_dict.values())
edge_attrs = list(ut.take_column(graph.edges(data=True), 2))
if stats:
import utool
with utool.embed_on_exception_context:
import pandas as pd
node_df = pd.DataFrame(node_attrs)
edge_df = pd.DataFrame(edge_attrs)
if ignore is not None:
ut.delete_dict_keys(node_df, ignore)
ut.delete_dict_keys(edge_df, ignore)
# Not really histograms anymore
try:
node_attr_hist = node_df.describe().to_dict()
except ValueError:
node_attr_hist
try:
edge_attr_hist = edge_df.describe().to_dict()
except ValueError:
edge_attr_hist = {}
key_order = ['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max']
node_attr_hist = ut.map_dict_vals(lambda x: ut.order_dict_by(x, key_order), node_attr_hist)
edge_attr_hist = ut.map_dict_vals(lambda x: ut.order_dict_by(x, key_order), edge_attr_hist)
else:
node_attr_hist = ut.dict_hist(ut.flatten([attr.keys() for attr in node_attrs]))
edge_attr_hist = ut.dict_hist(ut.flatten([attr.keys() for attr in edge_attrs]))
if ignore is not None:
ut.delete_dict_keys(edge_attr_hist, ignore)
ut.delete_dict_keys(node_attr_hist, ignore)
node_type_hist = ut.dict_hist(list(map(type, graph.nodes())))
info_dict = ut.odict([
('directed', graph.is_directed()),
('multi', graph.is_multigraph()),
('num_nodes', len(graph)),
('num_edges', len(list(graph.edges()))),
('edge_attr_hist', ut.sort_dict(edge_attr_hist)),
('node_attr_hist', ut.sort_dict(node_attr_hist)),
('node_type_hist', ut.sort_dict(node_type_hist)),
('graph_attrs', graph.graph),
('graph_name', graph.name),
])
#unique_attrs = ut.map_dict_vals(ut.unique, ut.dict_accum(*node_attrs))
#ut.dict_isect_combine(*node_attrs))
#[list(attrs.keys())]
if verbose:
print(ut.repr3(info_dict))
return info_dict
def get_graph_bounding_box(graph):
# import utool as ut
try:
import vtool_ibeis as vt
except ImportError:
import vtool as vt
#nx.get_node_attrs = nx.get_node_attributes
nodes = list(graph.nodes())
# pos_list = nx_gen_node_values(graph, 'pos', nodes, default=(0, 0))
# shape_list = nx_gen_node_values(graph, 'size', nodes, default=(1, 1))
shape_list = nx_gen_node_values(graph, 'size', nodes)
pos_list = nx_gen_node_values(graph, 'pos', nodes)
node_extents = np.array([
vt.extent_from_bbox(vt.bbox_from_center_wh(xy, wh))
for xy, wh in zip(pos_list, shape_list)
])
tl_x, br_x, tl_y, br_y = node_extents.T
extent = tl_x.min(), br_x.max(), tl_y.min(), br_y.max()
bbox = vt.bbox_from_extent(extent)
return bbox
def translate_graph(graph, t_xy):
#import utool as ut
import utool as ut
node_pos_attrs = ['pos']
for attr in node_pos_attrs:
attrdict = nx.get_node_attributes(graph, attr)
attrdict = {
node: pos + t_xy
for node, pos in attrdict.items()
}
nx.set_node_attributes(graph, name=attr, values=attrdict)
edge_pos_attrs = ['ctrl_pts', 'end_pt', 'head_lp', 'lp', 'start_pt', 'tail_lp']
ut.nx_delete_None_edge_attr(graph)
for attr in edge_pos_attrs:
attrdict = nx.get_edge_attributes(graph, attr)
attrdict = {
node: pos + t_xy
if pos is not None else pos
for node, pos in attrdict.items()
}
nx.set_edge_attributes(graph, name=attr, values=attrdict)
def translate_graph_to_origin(graph):
x, y, w, h = get_graph_bounding_box(graph)
translate_graph(graph, (-x, -y))
def stack_graphs(graph_list, vert=False, pad=None):
import utool as ut
graph_list_ = [g.copy() for g in graph_list]
for g in graph_list_:
translate_graph_to_origin(g)
bbox_list = [get_graph_bounding_box(g) for g in graph_list_]
if vert:
dim1 = 3
dim2 = 2
else:
dim1 = 2
dim2 = 3
dim1_list = np.array([bbox[dim1] for bbox in bbox_list])
dim2_list = np.array([bbox[dim2] for bbox in bbox_list])
if pad is None:
pad = np.mean(dim1_list) / 2
offset1_list = ut.cumsum([0] + [d + pad for d in dim1_list[:-1]])
max_dim2 = max(dim2_list)
offset2_list = [(max_dim2 - d2) / 2 for d2 in dim2_list]
if vert:
t_xy_list = [(d2, d1) for d1, d2 in zip(offset1_list, offset2_list)]
else:
t_xy_list = [(d1, d2) for d1, d2 in zip(offset1_list, offset2_list)]
for g, t_xy in zip(graph_list_, t_xy_list):
translate_graph(g, t_xy)
nx.set_node_attributes(g, name='pin', values='true')
new_graph = nx.compose_all(graph_list_)
#pt.show_nx(new_graph, layout='custom', node_labels=False, as_directed=False) # NOQA
return new_graph
def nx_contracted_nodes(G, u, v, self_loops=True, inplace=False):
"""
copy of networkx function with inplace modification
TODO: commit to networkx
"""
import itertools as it
if G.is_directed():
in_edges = ((w, u, d) for w, x, d in G.in_edges(v, data=True)
if self_loops or w != u)
out_edges = ((u, w, d) for x, w, d in G.out_edges(v, data=True)
if self_loops or w != u)
new_edges = it.chain(in_edges, out_edges)
else:
new_edges = ((u, w, d) for x, w, d in G.edges(v, data=True)
if self_loops or w != u)
if inplace:
H = G
new_edges = list(new_edges)
else:
H = G.copy()
node_dict = nx_node_dict(H)
v_data = node_dict[v]
H.remove_node(v)
H.add_edges_from(new_edges)
if 'contraction' in node_dict[u]:
node_dict[u]['contraction'][v] = v_data
else:
node_dict[u]['contraction'] = {v: v_data}
return H
def approx_min_num_components(nodes, negative_edges):
"""
Find approximate minimum number of connected components possible
Each edge represents that two nodes must be separated
This code doesn't solve the problem. The problem is NP-complete and
reduces to minimum clique cover (MCC). This is only an approximate
solution. Not sure what the approximation ratio is.
CommandLine:
python -m utool.util_graph approx_min_num_components
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> nodes = [1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> edges = [(1, 2), (2, 3), (3, 1),
>>> (4, 5), (5, 6), (6, 4),
>>> (7, 8), (8, 9), (9, 7),
>>> (1, 4), (4, 7), (7, 1),
>>> ]
>>> g_pos = nx.Graph()
>>> g_pos.add_edges_from(edges)
>>> g_neg = nx.complement(g_pos)
>>> #import plottool as pt
>>> #pt.qt4ensure()
>>> #pt.show_nx(g_pos)
>>> #pt.show_nx(g_neg)
>>> negative_edges = g_neg.edges()
>>> nodes = [1, 2, 3, 4, 5, 6, 7]
>>> negative_edges = [(1, 2), (2, 3), (4, 5)]
>>> result = approx_min_num_components(nodes, negative_edges)
>>> print(result)
2
"""
import utool as ut
num = 0
g_neg = nx.Graph()
g_neg.add_nodes_from(nodes)
g_neg.add_edges_from(negative_edges)
# Collapse all nodes with degree 0
if nx.__version__.startswith('2'):
deg0_nodes = [n for n, d in g_neg.degree() if d == 0]
else:
deg0_nodes = [n for n, d in g_neg.degree_iter() if d == 0]
for u, v in ut.itertwo(deg0_nodes):
nx_contracted_nodes(g_neg, v, u, inplace=True)
# g_neg = nx.contracted_nodes(g_neg, v, u, self_loops=False)
# Initialize unused nodes to be everything
unused = list(g_neg.nodes())
# complement of the graph contains all possible positive edges
g_pos = nx.complement(g_neg)
if False:
from networkx.algorithms.approximation import clique
maxiset, cliques = clique.clique_removal(g_pos)
num = len(cliques)
return num
# Iterate until we have used all nodes
while len(unused) > 0:
# Seed a new "minimum component"
num += 1
# Grab a random unused node n1
#idx1 = np.random.randint(0, len(unused))
idx1 = 0
n1 = unused[idx1]
unused.remove(n1)
neigbs = list(g_pos.neighbors(n1))
neigbs = ut.isect(neigbs, unused)
while len(neigbs) > 0:
# Find node n2, that n1 could be connected to
#idx2 = np.random.randint(0, len(neigbs))
idx2 = 0
n2 = neigbs[idx2]
unused.remove(n2)
# Collapse negative information of n1 and n2
g_neg = nx.contracted_nodes(g_neg, n1, n2)
# Compute new possible positive edges
g_pos = nx.complement(g_neg)
# Iterate until n1 has no more possible connections
neigbs = list(g_pos.neighbors(n1))
neigbs = ut.isect(neigbs, unused)
print('num = %r' % (num,))
return num
def nx_mincut_edges_weighted(G, s, t, capacity='weight'):
# http://stackoverflow.com/questions/33332462/minimum-s-t-edge-cut-which-takes-edge-weight-into-consideration
cut_weight, partitions = nx.minimum_cut(G, s, t, capacity=capacity)
edge_cut_list = []
for p1_node in partitions[0]:
for p2_node in partitions[1]:
if G.has_edge(p1_node, p2_node):
edge_cut_list.append((p1_node, p2_node))
# assert edge_cut_list == nx_edges_between(G, partitions[0], partitions[1])
return edge_cut_list
def weighted_diamter(graph, weight=None):
if weight is None:
distances = nx.all_pairs_shortest_path_length(graph)
else:
distances = nx.all_pairs_dijkstra_path_length(graph, weight=weight)
if isinstance(distances, dict):
eccentricities = (max(list(dists.values())) for node, dists in distances.items())
else:
eccentricities = (max(list(dists.values())) for node, dists in distances)
diameter = max(list(eccentricities))
return diameter
def mincost_diameter_augment(graph, max_cost, candidates=None, weight=None, cost=None):
"""
PROBLEM: Bounded Cost Minimum Diameter Edge Addition (BCMD)
Args:
graph (nx.Graph): input graph
max_cost (float): maximum weighted diamter of the graph
weight (str): key of the edge weight attribute
cost (str): key of the edge cost attribute
candidates (list): set of non-edges, optional, defaults
to the complement of the graph
Returns:
None: if no solution exists
list: minimum cost edges if solution exists
Notes:
We are given a graph G = (V, E) with an edge weight function w, an edge
cost function c, an a maximum cost B.
The goal is to find a set of candidate non-edges F.
Let x[e] in {0, 1} denote if a non-edge e is excluded or included.
minimize sum(c(e) * x[e] for e in F)
such that
weighted_diamter(graph.union({e for e in F if x[e]})) <= B
References:
https://www.cse.unsw.edu.au/~sergeg/papers/FratiGGM13isaac.pdf
http://www.cis.upenn.edu/~sanjeev/papers/diameter.pdf
http://dl.acm.org/citation.cfm?id=2953882
Notes:
There is a 4-Approximation of the BCMD problem
Running time is O((3 ** B * B ** 3 + n + log(B * n)) * B * n ** 2)
This algorithm usexs a clustering approach to find a set C, of B + 1
cluster centers. Then we create a minimum height rooted tree, T = (U
\subseteq V, D) so that C \subseteq U. This tree T approximates an
optimal B-augmentation.
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> graph = nx.Graph()
>>> if nx.__version__.startswith('1'):
>>> nx.add_path = nx.Graph.add_path
>>> nx.add_path(graph, range(6))
>>> #cost_func = lambda e: e[0] + e[1]
>>> cost_func = lambda e: 1
>>> weight_func = lambda e: (e[0]) / e[1]
>>> comp_graph = nx.complement(graph)
>>> nx.set_edge_attributes(graph, name='cost', values={e: cost_func(e) for e in graph.edges()})
>>> nx.set_edge_attributes(graph, name='weight', values={e: weight_func(e) for e in graph.edges()})
>>> nx.set_edge_attributes(comp_graph, name='cost', values={e: cost_func(e) for e in comp_graph.edges()})
>>> nx.set_edge_attributes(comp_graph, name='weight', values={e: weight_func(e) for e in comp_graph.edges()})
>>> candidates = list(comp_graph.edges(data=True))
>>> max_cost = 2
>>> cost = 'cost'
>>> weight = 'weight'
>>> best_edges = mincost_diameter_augment(graph, max_cost, candidates, weight, cost)
>>> print('best_edges = %r' % (best_edges,))
>>> soln_edges = greedy_mincost_diameter_augment(graph, max_cost, candidates, weight, cost)
>>> print('soln_edges = %r' % (soln_edges,))
"""
import utool as ut
import operator as op
if candidates is None:
candidates = list(graph.complement().edges(data=True))
def augment_add(graph, edges):
aug_graph = graph.copy()
aug_graph.add_edges_from(edges)
return aug_graph
def solution_energy(chosen_edges):
if weight is None:
return len(chosen_edges)
else:
return sum(d[weight] for (u, v, d) in chosen_edges)
variable_basis = [(0, 1) for _ in candidates]
best_energy = np.inf
best_soln = None
soln_generator = ut.product(*variable_basis)
length = reduce(op.mul, map(len, variable_basis), 1)
if length > 3000:
# Let the user know that it might take some time to find a solution
soln_generator = ut.ProgIter(soln_generator, label='BruteForce BCMD',
length=length)
# Brute force solution
for x in soln_generator:
chosen_edges = ut.compress(candidates, x)
aug_graph = augment_add(graph, chosen_edges)
total_cost = weighted_diamter(aug_graph, weight=cost)
energy = solution_energy(chosen_edges)
if total_cost <= max_cost:
if energy < best_energy:
best_energy = energy
best_soln = x
best_edges = ut.compress(candidates, best_soln)
return best_edges
def greedy_mincost_diameter_augment(graph, max_cost, candidates=None, weight=None, cost=None):
# import utool as ut
def solution_cost(graph):
return weighted_diamter(graph, weight=cost)
def solution_energy(chosen_edges):
if weight is None:
return len(chosen_edges)
else:
return sum(d[weight] for (u, v, d) in chosen_edges)
def augment_add(graph, edges):
aug_graph = graph.copy()
aug_graph.add_edges_from(edges)
return aug_graph
def augment_remove(graph, edges):
aug_graph = graph.copy()
aug_graph.remove_edges_from(edges)
return aug_graph
base_cost = solution_cost(graph)
# base_energy = 0
full_graph = augment_add(graph, candidates)
full_cost = solution_cost(full_graph)
# full_energy = solution_energy(candidates)
def greedy_improvement(soln_graph, available_candidates, base_cost=None):
"""
Choose edge that results in the best improvement
"""
best_loss = None
best_cost = None
best_energy = None
best_e = None
best_graph = None
for e in available_candidates:
aug_graph = augment_add(soln_graph, [e])
aug_cost = solution_cost(aug_graph)
aug_energy = solution_energy([e])
# We don't want to go over if possible
aug_loss = max(aug_cost - max_cost, 0)
if best_loss is None or aug_loss <= best_loss:
if best_energy is None or aug_energy < best_energy:
best_loss = aug_loss
best_e = e
best_graph = aug_graph
best_cost = aug_cost
best_energy = aug_energy
if best_e is None:
return None
else:
return best_cost, best_graph, best_energy, best_e
import warnings
if full_cost > max_cost:
warnings.warn('no feasible solution')
else:
soln_graph = graph.copy()
available_candidates = candidates[:]
soln_edges = []
soln_energy = 0
soln_cost = base_cost
# Add edges to the solution until the cost is feasible
while soln_cost > max_cost and len(available_candidates):
tup = greedy_improvement(soln_graph, available_candidates, soln_cost)
if tup is None:
warnings.warn('no improvement found')
break
soln_cost, soln_graph, best_energy, best_e = tup
soln_energy += best_energy
soln_edges.append(best_e)
available_candidates.remove(best_e)
# Check to see we can remove edges while maintaining feasibility
for e in soln_edges[:]:
aug_graph = augment_remove(soln_graph, [e])
aug_cost = solution_cost(aug_graph)
if aug_cost <= soln_cost:
soln_cost = aug_cost
soln_graph = aug_graph
soln_edges.remove(e)
return soln_edges
if __name__ == '__main__':
r"""
CommandLine:
python -m utool.util_graph --allexamples
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| apache-2.0 |
bdrillard/spark | python/pyspark/sql/functions.py | 1 | 143697 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A collections of builtin functions
"""
import sys
import functools
import warnings
if sys.version < "3":
from itertools import imap as map
if sys.version >= '3':
basestring = str
from pyspark import since, SparkContext
from pyspark.rdd import ignore_unicode_prefix, PythonEvalType
from pyspark.sql.column import Column, _to_java_column, _to_seq, _create_column_from_literal, \
_create_column_from_name
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import StringType, DataType
# Keep UserDefinedFunction import for backwards compatible import; moved in SPARK-22409
from pyspark.sql.udf import UserDefinedFunction, _create_udf
from pyspark.sql.utils import to_str
# Note to developers: all of PySpark functions here take string as column names whenever possible.
# Namely, if columns are referred as arguments, they can be always both Column or string,
# even though there might be few exceptions for legacy or inevitable reasons.
# If you are fixing other language APIs together, also please note that Scala side is not the case
# since it requires to make every single overridden definition.
def _create_function(name, doc=""):
"""Create a PySpark function by its name"""
def _(col):
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)(col._jc if isinstance(col, Column) else col)
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _create_function_over_column(name, doc=""):
"""Similar with `_create_function` but creates a PySpark function that takes a column
(as string as well). This is mainly for PySpark functions to take strings as
column names.
"""
def _(col):
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)(_to_java_column(col))
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _wrap_deprecated_function(func, message):
""" Wrap the deprecated function to print out deprecation warnings"""
def _(col):
warnings.warn(message, DeprecationWarning)
return func(col)
return functools.wraps(func)(_)
def _create_binary_mathfunction(name, doc=""):
""" Create a binary mathfunction by name"""
def _(col1, col2):
sc = SparkContext._active_spark_context
# For legacy reasons, the arguments here can be implicitly converted into floats,
# if they are not columns or strings.
if isinstance(col1, Column):
arg1 = col1._jc
elif isinstance(col1, basestring):
arg1 = _create_column_from_name(col1)
else:
arg1 = float(col1)
if isinstance(col2, Column):
arg2 = col2._jc
elif isinstance(col2, basestring):
arg2 = _create_column_from_name(col2)
else:
arg2 = float(col2)
jc = getattr(sc._jvm.functions, name)(arg1, arg2)
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _create_window_function(name, doc=''):
""" Create a window function by name """
def _():
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)()
return Column(jc)
_.__name__ = name
_.__doc__ = 'Window function: ' + doc
return _
def _options_to_str(options):
return {key: to_str(value) for (key, value) in options.items()}
_lit_doc = """
Creates a :class:`Column` of literal value.
>>> df.select(lit(5).alias('height')).withColumn('spark_user', lit(True)).take(1)
[Row(height=5, spark_user=True)]
"""
_functions = {
'lit': _lit_doc,
'col': 'Returns a :class:`Column` based on the given column name.',
'column': 'Returns a :class:`Column` based on the given column name.',
'asc': 'Returns a sort expression based on the ascending order of the given column name.',
'desc': 'Returns a sort expression based on the descending order of the given column name.',
}
_functions_over_column = {
'sqrt': 'Computes the square root of the specified float value.',
'abs': 'Computes the absolute value.',
'max': 'Aggregate function: returns the maximum value of the expression in a group.',
'min': 'Aggregate function: returns the minimum value of the expression in a group.',
'count': 'Aggregate function: returns the number of items in a group.',
'sum': 'Aggregate function: returns the sum of all values in the expression.',
'avg': 'Aggregate function: returns the average of the values in a group.',
'mean': 'Aggregate function: returns the average of the values in a group.',
'sumDistinct': 'Aggregate function: returns the sum of distinct values in the expression.',
}
_functions_1_4_over_column = {
# unary math functions
'acos': ':return: inverse cosine of `col`, as if computed by `java.lang.Math.acos()`',
'asin': ':return: inverse sine of `col`, as if computed by `java.lang.Math.asin()`',
'atan': ':return: inverse tangent of `col`, as if computed by `java.lang.Math.atan()`',
'cbrt': 'Computes the cube-root of the given value.',
'ceil': 'Computes the ceiling of the given value.',
'cos': """:param col: angle in radians
:return: cosine of the angle, as if computed by `java.lang.Math.cos()`.""",
'cosh': """:param col: hyperbolic angle
:return: hyperbolic cosine of the angle, as if computed by `java.lang.Math.cosh()`""",
'exp': 'Computes the exponential of the given value.',
'expm1': 'Computes the exponential of the given value minus one.',
'floor': 'Computes the floor of the given value.',
'log': 'Computes the natural logarithm of the given value.',
'log10': 'Computes the logarithm of the given value in Base 10.',
'log1p': 'Computes the natural logarithm of the given value plus one.',
'rint': 'Returns the double value that is closest in value to the argument and' +
' is equal to a mathematical integer.',
'signum': 'Computes the signum of the given value.',
'sin': """:param col: angle in radians
:return: sine of the angle, as if computed by `java.lang.Math.sin()`""",
'sinh': """:param col: hyperbolic angle
:return: hyperbolic sine of the given value,
as if computed by `java.lang.Math.sinh()`""",
'tan': """:param col: angle in radians
:return: tangent of the given value, as if computed by `java.lang.Math.tan()`""",
'tanh': """:param col: hyperbolic angle
:return: hyperbolic tangent of the given value,
as if computed by `java.lang.Math.tanh()`""",
'toDegrees': '.. note:: Deprecated in 2.1, use :func:`degrees` instead.',
'toRadians': '.. note:: Deprecated in 2.1, use :func:`radians` instead.',
'bitwiseNOT': 'Computes bitwise not.',
}
_functions_2_4 = {
'asc_nulls_first': 'Returns a sort expression based on the ascending order of the given' +
' column name, and null values return before non-null values.',
'asc_nulls_last': 'Returns a sort expression based on the ascending order of the given' +
' column name, and null values appear after non-null values.',
'desc_nulls_first': 'Returns a sort expression based on the descending order of the given' +
' column name, and null values appear before non-null values.',
'desc_nulls_last': 'Returns a sort expression based on the descending order of the given' +
' column name, and null values appear after non-null values',
}
_collect_list_doc = """
Aggregate function: returns a list of objects with duplicates.
.. note:: The function is non-deterministic because the order of collected results depends
on order of rows which may be non-deterministic after a shuffle.
>>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',))
>>> df2.agg(collect_list('age')).collect()
[Row(collect_list(age)=[2, 5, 5])]
"""
_collect_set_doc = """
Aggregate function: returns a set of objects with duplicate elements eliminated.
.. note:: The function is non-deterministic because the order of collected results depends
on order of rows which may be non-deterministic after a shuffle.
>>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',))
>>> df2.agg(collect_set('age')).collect()
[Row(collect_set(age)=[5, 2])]
"""
_functions_1_6_over_column = {
# unary math functions
'stddev': 'Aggregate function: alias for stddev_samp.',
'stddev_samp': 'Aggregate function: returns the unbiased sample standard deviation of' +
' the expression in a group.',
'stddev_pop': 'Aggregate function: returns population standard deviation of' +
' the expression in a group.',
'variance': 'Aggregate function: alias for var_samp.',
'var_samp': 'Aggregate function: returns the unbiased sample variance of' +
' the values in a group.',
'var_pop': 'Aggregate function: returns the population variance of the values in a group.',
'skewness': 'Aggregate function: returns the skewness of the values in a group.',
'kurtosis': 'Aggregate function: returns the kurtosis of the values in a group.',
'collect_list': _collect_list_doc,
'collect_set': _collect_set_doc
}
_functions_2_1_over_column = {
# unary math functions
'degrees': """
Converts an angle measured in radians to an approximately equivalent angle
measured in degrees.
:param col: angle in radians
:return: angle in degrees, as if computed by `java.lang.Math.toDegrees()`
""",
'radians': """
Converts an angle measured in degrees to an approximately equivalent angle
measured in radians.
:param col: angle in degrees
:return: angle in radians, as if computed by `java.lang.Math.toRadians()`
""",
}
# math functions that take two arguments as input
_binary_mathfunctions = {
'atan2': """
:param col1: coordinate on y-axis
:param col2: coordinate on x-axis
:return: the `theta` component of the point
(`r`, `theta`)
in polar coordinates that corresponds to the point
(`x`, `y`) in Cartesian coordinates,
as if computed by `java.lang.Math.atan2()`
""",
'hypot': 'Computes ``sqrt(a^2 + b^2)`` without intermediate overflow or underflow.',
'pow': 'Returns the value of the first argument raised to the power of the second argument.',
}
_window_functions = {
'row_number':
"""returns a sequential number starting at 1 within a window partition.""",
'dense_rank':
"""returns the rank of rows within a window partition, without any gaps.
The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using dense_rank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third. Rank would give me sequential numbers, making
the person that came in third place (after the ties) would register as coming in fifth.
This is equivalent to the DENSE_RANK function in SQL.""",
'rank':
"""returns the rank of rows within a window partition.
The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using dense_rank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third. Rank would give me sequential numbers, making
the person that came in third place (after the ties) would register as coming in fifth.
This is equivalent to the RANK function in SQL.""",
'cume_dist':
"""returns the cumulative distribution of values within a window partition,
i.e. the fraction of rows that are below the current row.""",
'percent_rank':
"""returns the relative rank (i.e. percentile) of rows within a window partition.""",
}
# Wraps deprecated functions (keys) with the messages (values).
_functions_deprecated = {
}
for _name, _doc in _functions.items():
globals()[_name] = since(1.3)(_create_function(_name, _doc))
for _name, _doc in _functions_over_column.items():
globals()[_name] = since(1.3)(_create_function_over_column(_name, _doc))
for _name, _doc in _functions_1_4_over_column.items():
globals()[_name] = since(1.4)(_create_function_over_column(_name, _doc))
for _name, _doc in _binary_mathfunctions.items():
globals()[_name] = since(1.4)(_create_binary_mathfunction(_name, _doc))
for _name, _doc in _window_functions.items():
globals()[_name] = since(1.6)(_create_window_function(_name, _doc))
for _name, _doc in _functions_1_6_over_column.items():
globals()[_name] = since(1.6)(_create_function_over_column(_name, _doc))
for _name, _doc in _functions_2_1_over_column.items():
globals()[_name] = since(2.1)(_create_function_over_column(_name, _doc))
for _name, _message in _functions_deprecated.items():
globals()[_name] = _wrap_deprecated_function(globals()[_name], _message)
for _name, _doc in _functions_2_4.items():
globals()[_name] = since(2.4)(_create_function(_name, _doc))
del _name, _doc
@since(2.1)
def approx_count_distinct(col, rsd=None):
"""Aggregate function: returns a new :class:`Column` for approximate distinct count of
column `col`.
:param rsd: maximum estimation error allowed (default = 0.05). For rsd < 0.01, it is more
efficient to use :func:`countDistinct`
>>> df.agg(approx_count_distinct(df.age).alias('distinct_ages')).collect()
[Row(distinct_ages=2)]
"""
sc = SparkContext._active_spark_context
if rsd is None:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col))
else:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col), rsd)
return Column(jc)
@since(1.6)
def broadcast(df):
"""Marks a DataFrame as small enough for use in broadcast joins."""
sc = SparkContext._active_spark_context
return DataFrame(sc._jvm.functions.broadcast(df._jdf), df.sql_ctx)
@since(1.4)
def coalesce(*cols):
"""Returns the first column that is not null.
>>> cDf = spark.createDataFrame([(None, None), (1, None), (None, 2)], ("a", "b"))
>>> cDf.show()
+----+----+
| a| b|
+----+----+
|null|null|
| 1|null|
|null| 2|
+----+----+
>>> cDf.select(coalesce(cDf["a"], cDf["b"])).show()
+--------------+
|coalesce(a, b)|
+--------------+
| null|
| 1|
| 2|
+--------------+
>>> cDf.select('*', coalesce(cDf["a"], lit(0.0))).show()
+----+----+----------------+
| a| b|coalesce(a, 0.0)|
+----+----+----------------+
|null|null| 0.0|
| 1|null| 1.0|
|null| 2| 0.0|
+----+----+----------------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.coalesce(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.6)
def corr(col1, col2):
"""Returns a new :class:`Column` for the Pearson Correlation Coefficient for ``col1``
and ``col2``.
>>> a = range(20)
>>> b = [2 * x for x in range(20)]
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(corr("a", "b").alias('c')).collect()
[Row(c=1.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.corr(_to_java_column(col1), _to_java_column(col2)))
@since(2.0)
def covar_pop(col1, col2):
"""Returns a new :class:`Column` for the population covariance of ``col1`` and ``col2``.
>>> a = [1] * 10
>>> b = [1] * 10
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(covar_pop("a", "b").alias('c')).collect()
[Row(c=0.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.covar_pop(_to_java_column(col1), _to_java_column(col2)))
@since(2.0)
def covar_samp(col1, col2):
"""Returns a new :class:`Column` for the sample covariance of ``col1`` and ``col2``.
>>> a = [1] * 10
>>> b = [1] * 10
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(covar_samp("a", "b").alias('c')).collect()
[Row(c=0.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.covar_samp(_to_java_column(col1), _to_java_column(col2)))
@since(1.3)
def countDistinct(col, *cols):
"""Returns a new :class:`Column` for distinct count of ``col`` or ``cols``.
>>> df.agg(countDistinct(df.age, df.name).alias('c')).collect()
[Row(c=2)]
>>> df.agg(countDistinct("age", "name").alias('c')).collect()
[Row(c=2)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.countDistinct(_to_java_column(col), _to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.3)
def first(col, ignorenulls=False):
"""Aggregate function: returns the first value in a group.
The function by default returns the first values it sees. It will return the first non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
.. note:: The function is non-deterministic because its results depends on order of rows which
may be non-deterministic after a shuffle.
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.first(_to_java_column(col), ignorenulls)
return Column(jc)
@since(2.0)
def grouping(col):
"""
Aggregate function: indicates whether a specified column in a GROUP BY list is aggregated
or not, returns 1 for aggregated or 0 for not aggregated in the result set.
>>> df.cube("name").agg(grouping("name"), sum("age")).orderBy("name").show()
+-----+--------------+--------+
| name|grouping(name)|sum(age)|
+-----+--------------+--------+
| null| 1| 7|
|Alice| 0| 2|
| Bob| 0| 5|
+-----+--------------+--------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.grouping(_to_java_column(col))
return Column(jc)
@since(2.0)
def grouping_id(*cols):
"""
Aggregate function: returns the level of grouping, equals to
(grouping(c1) << (n-1)) + (grouping(c2) << (n-2)) + ... + grouping(cn)
.. note:: The list of columns should match with grouping columns exactly, or empty (means all
the grouping columns).
>>> df.cube("name").agg(grouping_id(), sum("age")).orderBy("name").show()
+-----+-------------+--------+
| name|grouping_id()|sum(age)|
+-----+-------------+--------+
| null| 1| 7|
|Alice| 0| 2|
| Bob| 0| 5|
+-----+-------------+--------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.grouping_id(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.6)
def input_file_name():
"""Creates a string column for the file name of the current Spark task.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.input_file_name())
@since(1.6)
def isnan(col):
"""An expression that returns true iff the column is NaN.
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(isnan("a").alias("r1"), isnan(df.a).alias("r2")).collect()
[Row(r1=False, r2=False), Row(r1=True, r2=True)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.isnan(_to_java_column(col)))
@since(1.6)
def isnull(col):
"""An expression that returns true iff the column is null.
>>> df = spark.createDataFrame([(1, None), (None, 2)], ("a", "b"))
>>> df.select(isnull("a").alias("r1"), isnull(df.a).alias("r2")).collect()
[Row(r1=False, r2=False), Row(r1=True, r2=True)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.isnull(_to_java_column(col)))
@since(1.3)
def last(col, ignorenulls=False):
"""Aggregate function: returns the last value in a group.
The function by default returns the last values it sees. It will return the last non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
.. note:: The function is non-deterministic because its results depends on order of rows
which may be non-deterministic after a shuffle.
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.last(_to_java_column(col), ignorenulls)
return Column(jc)
@since(1.6)
def monotonically_increasing_id():
"""A column that generates monotonically increasing 64-bit integers.
The generated ID is guaranteed to be monotonically increasing and unique, but not consecutive.
The current implementation puts the partition ID in the upper 31 bits, and the record number
within each partition in the lower 33 bits. The assumption is that the data frame has
less than 1 billion partitions, and each partition has less than 8 billion records.
.. note:: The function is non-deterministic because its result depends on partition IDs.
As an example, consider a :class:`DataFrame` with two partitions, each with 3 records.
This expression would return the following IDs:
0, 1, 2, 8589934592 (1L << 33), 8589934593, 8589934594.
>>> df0 = sc.parallelize(range(2), 2).mapPartitions(lambda x: [(1,), (2,), (3,)]).toDF(['col1'])
>>> df0.select(monotonically_increasing_id().alias('id')).collect()
[Row(id=0), Row(id=1), Row(id=2), Row(id=8589934592), Row(id=8589934593), Row(id=8589934594)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.monotonically_increasing_id())
@since(1.6)
def nanvl(col1, col2):
"""Returns col1 if it is not NaN, or col2 if col1 is NaN.
Both inputs should be floating point columns (:class:`DoubleType` or :class:`FloatType`).
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(nanvl("a", "b").alias("r1"), nanvl(df.a, df.b).alias("r2")).collect()
[Row(r1=1.0, r2=1.0), Row(r1=2.0, r2=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.nanvl(_to_java_column(col1), _to_java_column(col2)))
@ignore_unicode_prefix
@since(1.4)
def rand(seed=None):
"""Generates a random column with independent and identically distributed (i.i.d.) samples
from U[0.0, 1.0].
.. note:: The function is non-deterministic in general case.
>>> df.withColumn('rand', rand(seed=42) * 3).collect()
[Row(age=2, name=u'Alice', rand=2.4052597283576684),
Row(age=5, name=u'Bob', rand=2.3913904055683974)]
"""
sc = SparkContext._active_spark_context
if seed is not None:
jc = sc._jvm.functions.rand(seed)
else:
jc = sc._jvm.functions.rand()
return Column(jc)
@ignore_unicode_prefix
@since(1.4)
def randn(seed=None):
"""Generates a column with independent and identically distributed (i.i.d.) samples from
the standard normal distribution.
.. note:: The function is non-deterministic in general case.
>>> df.withColumn('randn', randn(seed=42)).collect()
[Row(age=2, name=u'Alice', randn=1.1027054481455365),
Row(age=5, name=u'Bob', randn=0.7400395449950132)]
"""
sc = SparkContext._active_spark_context
if seed is not None:
jc = sc._jvm.functions.randn(seed)
else:
jc = sc._jvm.functions.randn()
return Column(jc)
@since(1.5)
def round(col, scale=0):
"""
Round the given value to `scale` decimal places using HALF_UP rounding mode if `scale` >= 0
or at integral part when `scale` < 0.
>>> spark.createDataFrame([(2.5,)], ['a']).select(round('a', 0).alias('r')).collect()
[Row(r=3.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.round(_to_java_column(col), scale))
@since(2.0)
def bround(col, scale=0):
"""
Round the given value to `scale` decimal places using HALF_EVEN rounding mode if `scale` >= 0
or at integral part when `scale` < 0.
>>> spark.createDataFrame([(2.5,)], ['a']).select(bround('a', 0).alias('r')).collect()
[Row(r=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.bround(_to_java_column(col), scale))
@since(1.5)
def shiftLeft(col, numBits):
"""Shift the given value numBits left.
>>> spark.createDataFrame([(21,)], ['a']).select(shiftLeft('a', 1).alias('r')).collect()
[Row(r=42)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.shiftLeft(_to_java_column(col), numBits))
@since(1.5)
def shiftRight(col, numBits):
"""(Signed) shift the given value numBits right.
>>> spark.createDataFrame([(42,)], ['a']).select(shiftRight('a', 1).alias('r')).collect()
[Row(r=21)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRight(_to_java_column(col), numBits)
return Column(jc)
@since(1.5)
def shiftRightUnsigned(col, numBits):
"""Unsigned shift the given value numBits right.
>>> df = spark.createDataFrame([(-42,)], ['a'])
>>> df.select(shiftRightUnsigned('a', 1).alias('r')).collect()
[Row(r=9223372036854775787)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRightUnsigned(_to_java_column(col), numBits)
return Column(jc)
@since(1.6)
def spark_partition_id():
"""A column for partition ID.
.. note:: This is indeterministic because it depends on data partitioning and task scheduling.
>>> df.repartition(1).select(spark_partition_id().alias("pid")).collect()
[Row(pid=0), Row(pid=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.spark_partition_id())
@since(1.5)
def expr(str):
"""Parses the expression string into the column that it represents
>>> df.select(expr("length(name)")).collect()
[Row(length(name)=5), Row(length(name)=3)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.expr(str))
@ignore_unicode_prefix
@since(1.4)
def struct(*cols):
"""Creates a new struct column.
:param cols: list of column names (string) or list of :class:`Column` expressions
>>> df.select(struct('age', 'name').alias("struct")).collect()
[Row(struct=Row(age=2, name=u'Alice')), Row(struct=Row(age=5, name=u'Bob'))]
>>> df.select(struct([df.age, df.name]).alias("struct")).collect()
[Row(struct=Row(age=2, name=u'Alice')), Row(struct=Row(age=5, name=u'Bob'))]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.struct(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.5)
def greatest(*cols):
"""
Returns the greatest value of the list of column names, skipping null values.
This function takes at least 2 parameters. It will return null iff all parameters are null.
>>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c'])
>>> df.select(greatest(df.a, df.b, df.c).alias("greatest")).collect()
[Row(greatest=4)]
"""
if len(cols) < 2:
raise ValueError("greatest should take at least two columns")
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.greatest(_to_seq(sc, cols, _to_java_column)))
@since(1.5)
def least(*cols):
"""
Returns the least value of the list of column names, skipping null values.
This function takes at least 2 parameters. It will return null iff all parameters are null.
>>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c'])
>>> df.select(least(df.a, df.b, df.c).alias("least")).collect()
[Row(least=1)]
"""
if len(cols) < 2:
raise ValueError("least should take at least two columns")
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.least(_to_seq(sc, cols, _to_java_column)))
@since(1.4)
def when(condition, value):
"""Evaluates a list of conditions and returns one of multiple possible result expressions.
If :func:`Column.otherwise` is not invoked, None is returned for unmatched conditions.
:param condition: a boolean :class:`Column` expression.
:param value: a literal value, or a :class:`Column` expression.
>>> df.select(when(df['age'] == 2, 3).otherwise(4).alias("age")).collect()
[Row(age=3), Row(age=4)]
>>> df.select(when(df.age == 2, df.age + 1).alias("age")).collect()
[Row(age=3), Row(age=None)]
"""
sc = SparkContext._active_spark_context
if not isinstance(condition, Column):
raise TypeError("condition should be a Column")
v = value._jc if isinstance(value, Column) else value
jc = sc._jvm.functions.when(condition._jc, v)
return Column(jc)
@since(1.5)
def log(arg1, arg2=None):
"""Returns the first argument-based logarithm of the second argument.
If there is only one argument, then this takes the natural logarithm of the argument.
>>> df.select(log(10.0, df.age).alias('ten')).rdd.map(lambda l: str(l.ten)[:7]).collect()
['0.30102', '0.69897']
>>> df.select(log(df.age).alias('e')).rdd.map(lambda l: str(l.e)[:7]).collect()
['0.69314', '1.60943']
"""
sc = SparkContext._active_spark_context
if arg2 is None:
jc = sc._jvm.functions.log(_to_java_column(arg1))
else:
jc = sc._jvm.functions.log(arg1, _to_java_column(arg2))
return Column(jc)
@since(1.5)
def log2(col):
"""Returns the base-2 logarithm of the argument.
>>> spark.createDataFrame([(4,)], ['a']).select(log2('a').alias('log2')).collect()
[Row(log2=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.log2(_to_java_column(col)))
@since(1.5)
@ignore_unicode_prefix
def conv(col, fromBase, toBase):
"""
Convert a number in a string column from one base to another.
>>> df = spark.createDataFrame([("010101",)], ['n'])
>>> df.select(conv(df.n, 2, 16).alias('hex')).collect()
[Row(hex=u'15')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.conv(_to_java_column(col), fromBase, toBase))
@since(1.5)
def factorial(col):
"""
Computes the factorial of the given value.
>>> df = spark.createDataFrame([(5,)], ['n'])
>>> df.select(factorial(df.n).alias('f')).collect()
[Row(f=120)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.factorial(_to_java_column(col)))
# --------------- Window functions ------------------------
@since(1.4)
def lag(col, offset=1, default=None):
"""
Window function: returns the value that is `offset` rows before the current row, and
`defaultValue` if there is less than `offset` rows before the current row. For example,
an `offset` of one will return the previous row at any given point in the window partition.
This is equivalent to the LAG function in SQL.
:param col: name of column or expression
:param offset: number of row to extend
:param default: default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lag(_to_java_column(col), offset, default))
@since(1.4)
def lead(col, offset=1, default=None):
"""
Window function: returns the value that is `offset` rows after the current row, and
`defaultValue` if there is less than `offset` rows after the current row. For example,
an `offset` of one will return the next row at any given point in the window partition.
This is equivalent to the LEAD function in SQL.
:param col: name of column or expression
:param offset: number of row to extend
:param default: default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lead(_to_java_column(col), offset, default))
@since(1.4)
def ntile(n):
"""
Window function: returns the ntile group id (from 1 to `n` inclusive)
in an ordered window partition. For example, if `n` is 4, the first
quarter of the rows will get value 1, the second quarter will get 2,
the third quarter will get 3, and the last quarter will get 4.
This is equivalent to the NTILE function in SQL.
:param n: an integer
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.ntile(int(n)))
# ---------------------- Date/Timestamp functions ------------------------------
@since(1.5)
def current_date():
"""
Returns the current date as a :class:`DateType` column.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.current_date())
def current_timestamp():
"""
Returns the current timestamp as a :class:`TimestampType` column.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.current_timestamp())
@ignore_unicode_prefix
@since(1.5)
def date_format(date, format):
"""
Converts a date/timestamp/string to a value of string in the format specified by the date
format given by the second argument.
A pattern could be for instance `dd.MM.yyyy` and could return a string like '18.03.1993'. All
pattern letters of the Java class `java.time.format.DateTimeFormatter` can be used.
.. note:: Use when ever possible specialized functions like `year`. These benefit from a
specialized implementation.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_format('dt', 'MM/dd/yyy').alias('date')).collect()
[Row(date=u'04/08/2015')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_format(_to_java_column(date), format))
@since(1.5)
def year(col):
"""
Extract the year of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(year('dt').alias('year')).collect()
[Row(year=2015)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.year(_to_java_column(col)))
@since(1.5)
def quarter(col):
"""
Extract the quarter of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(quarter('dt').alias('quarter')).collect()
[Row(quarter=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.quarter(_to_java_column(col)))
@since(1.5)
def month(col):
"""
Extract the month of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(month('dt').alias('month')).collect()
[Row(month=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.month(_to_java_column(col)))
@since(2.3)
def dayofweek(col):
"""
Extract the day of the week of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofweek('dt').alias('day')).collect()
[Row(day=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofweek(_to_java_column(col)))
@since(1.5)
def dayofmonth(col):
"""
Extract the day of the month of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofmonth('dt').alias('day')).collect()
[Row(day=8)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofmonth(_to_java_column(col)))
@since(1.5)
def dayofyear(col):
"""
Extract the day of the year of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofyear('dt').alias('day')).collect()
[Row(day=98)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofyear(_to_java_column(col)))
@since(1.5)
def hour(col):
"""
Extract the hours of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(hour('ts').alias('hour')).collect()
[Row(hour=13)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.hour(_to_java_column(col)))
@since(1.5)
def minute(col):
"""
Extract the minutes of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(minute('ts').alias('minute')).collect()
[Row(minute=8)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.minute(_to_java_column(col)))
@since(1.5)
def second(col):
"""
Extract the seconds of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(second('ts').alias('second')).collect()
[Row(second=15)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.second(_to_java_column(col)))
@since(1.5)
def weekofyear(col):
"""
Extract the week number of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(weekofyear(df.dt).alias('week')).collect()
[Row(week=15)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.weekofyear(_to_java_column(col)))
@since(1.5)
def date_add(start, days):
"""
Returns the date that is `days` days after `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_add(df.dt, 1).alias('next_date')).collect()
[Row(next_date=datetime.date(2015, 4, 9))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_add(_to_java_column(start), days))
@since(1.5)
def date_sub(start, days):
"""
Returns the date that is `days` days before `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_sub(df.dt, 1).alias('prev_date')).collect()
[Row(prev_date=datetime.date(2015, 4, 7))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_sub(_to_java_column(start), days))
@since(1.5)
def datediff(end, start):
"""
Returns the number of days from `start` to `end`.
>>> df = spark.createDataFrame([('2015-04-08','2015-05-10')], ['d1', 'd2'])
>>> df.select(datediff(df.d2, df.d1).alias('diff')).collect()
[Row(diff=32)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.datediff(_to_java_column(end), _to_java_column(start)))
@since(1.5)
def add_months(start, months):
"""
Returns the date that is `months` months after `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(add_months(df.dt, 1).alias('next_month')).collect()
[Row(next_month=datetime.date(2015, 5, 8))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.add_months(_to_java_column(start), months))
@since(1.5)
def months_between(date1, date2, roundOff=True):
"""
Returns number of months between dates date1 and date2.
If date1 is later than date2, then the result is positive.
If date1 and date2 are on the same day of month, or both are the last day of month,
returns an integer (time of day will be ignored).
The result is rounded off to 8 digits unless `roundOff` is set to `False`.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', '1996-10-30')], ['date1', 'date2'])
>>> df.select(months_between(df.date1, df.date2).alias('months')).collect()
[Row(months=3.94959677)]
>>> df.select(months_between(df.date1, df.date2, False).alias('months')).collect()
[Row(months=3.9495967741935485)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.months_between(
_to_java_column(date1), _to_java_column(date2), roundOff))
@since(2.2)
def to_date(col, format=None):
"""Converts a :class:`Column` of :class:`pyspark.sql.types.StringType` or
:class:`pyspark.sql.types.TimestampType` into :class:`pyspark.sql.types.DateType`
using the optionally specified format. Specify formats according to
`DateTimeFormatter <https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html>`_. # noqa
By default, it follows casting rules to :class:`pyspark.sql.types.DateType` if the format
is omitted (equivalent to ``col.cast("date")``).
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_date(df.t).alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_date(df.t, 'yyyy-MM-dd HH:mm:ss').alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
"""
sc = SparkContext._active_spark_context
if format is None:
jc = sc._jvm.functions.to_date(_to_java_column(col))
else:
jc = sc._jvm.functions.to_date(_to_java_column(col), format)
return Column(jc)
@since(2.2)
def to_timestamp(col, format=None):
"""Converts a :class:`Column` of :class:`pyspark.sql.types.StringType` or
:class:`pyspark.sql.types.TimestampType` into :class:`pyspark.sql.types.DateType`
using the optionally specified format. Specify formats according to
`DateTimeFormatter <https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html>`_. # noqa
By default, it follows casting rules to :class:`pyspark.sql.types.TimestampType` if the format
is omitted (equivalent to ``col.cast("timestamp")``).
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_timestamp(df.t).alias('dt')).collect()
[Row(dt=datetime.datetime(1997, 2, 28, 10, 30))]
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_timestamp(df.t, 'yyyy-MM-dd HH:mm:ss').alias('dt')).collect()
[Row(dt=datetime.datetime(1997, 2, 28, 10, 30))]
"""
sc = SparkContext._active_spark_context
if format is None:
jc = sc._jvm.functions.to_timestamp(_to_java_column(col))
else:
jc = sc._jvm.functions.to_timestamp(_to_java_column(col), format)
return Column(jc)
@since(1.5)
def trunc(date, format):
"""
Returns date truncated to the unit specified by the format.
:param format: 'year', 'yyyy', 'yy' or 'month', 'mon', 'mm'
>>> df = spark.createDataFrame([('1997-02-28',)], ['d'])
>>> df.select(trunc(df.d, 'year').alias('year')).collect()
[Row(year=datetime.date(1997, 1, 1))]
>>> df.select(trunc(df.d, 'mon').alias('month')).collect()
[Row(month=datetime.date(1997, 2, 1))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.trunc(_to_java_column(date), format))
@since(2.3)
def date_trunc(format, timestamp):
"""
Returns timestamp truncated to the unit specified by the format.
:param format: 'year', 'yyyy', 'yy', 'month', 'mon', 'mm',
'day', 'dd', 'hour', 'minute', 'second', 'week', 'quarter'
>>> df = spark.createDataFrame([('1997-02-28 05:02:11',)], ['t'])
>>> df.select(date_trunc('year', df.t).alias('year')).collect()
[Row(year=datetime.datetime(1997, 1, 1, 0, 0))]
>>> df.select(date_trunc('mon', df.t).alias('month')).collect()
[Row(month=datetime.datetime(1997, 2, 1, 0, 0))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_trunc(format, _to_java_column(timestamp)))
@since(1.5)
def next_day(date, dayOfWeek):
"""
Returns the first date which is later than the value of the date column.
Day of the week parameter is case insensitive, and accepts:
"Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun".
>>> df = spark.createDataFrame([('2015-07-27',)], ['d'])
>>> df.select(next_day(df.d, 'Sun').alias('date')).collect()
[Row(date=datetime.date(2015, 8, 2))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.next_day(_to_java_column(date), dayOfWeek))
@since(1.5)
def last_day(date):
"""
Returns the last day of the month which the given date belongs to.
>>> df = spark.createDataFrame([('1997-02-10',)], ['d'])
>>> df.select(last_day(df.d).alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.last_day(_to_java_column(date)))
@ignore_unicode_prefix
@since(1.5)
def from_unixtime(timestamp, format="uuuu-MM-dd HH:mm:ss"):
"""
Converts the number of seconds from unix epoch (1970-01-01 00:00:00 UTC) to a string
representing the timestamp of that moment in the current system time zone in the given
format.
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
>>> time_df = spark.createDataFrame([(1428476400,)], ['unix_time'])
>>> time_df.select(from_unixtime('unix_time').alias('ts')).collect()
[Row(ts=u'2015-04-08 00:00:00')]
>>> spark.conf.unset("spark.sql.session.timeZone")
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.from_unixtime(_to_java_column(timestamp), format))
@since(1.5)
def unix_timestamp(timestamp=None, format='uuuu-MM-dd HH:mm:ss'):
"""
Convert time string with given pattern ('uuuu-MM-dd HH:mm:ss', by default)
to Unix time stamp (in seconds), using the default timezone and the default
locale, return null if fail.
if `timestamp` is None, then it returns current timestamp.
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
>>> time_df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> time_df.select(unix_timestamp('dt', 'yyyy-MM-dd').alias('unix_time')).collect()
[Row(unix_time=1428476400)]
>>> spark.conf.unset("spark.sql.session.timeZone")
"""
sc = SparkContext._active_spark_context
if timestamp is None:
return Column(sc._jvm.functions.unix_timestamp())
return Column(sc._jvm.functions.unix_timestamp(_to_java_column(timestamp), format))
@since(1.5)
def from_utc_timestamp(timestamp, tz):
"""
This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function
takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in UTC, and
renders that timestamp as a timestamp in the given time zone.
However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not
timezone-agnostic. So in Spark this function just shift the timestamp value from UTC timezone to
the given timezone.
This function may return confusing result if the input is a string with timezone, e.g.
'2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp
according to the timezone in the string, and finally display the result by converting the
timestamp to string according to the session local timezone.
:param timestamp: the column that contains timestamps
:param tz: a string that has the ID of timezone, e.g. "GMT", "America/Los_Angeles", etc
.. versionchanged:: 2.4
`tz` can take a :class:`Column` containing timezone ID strings.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz'])
>>> df.select(from_utc_timestamp(df.ts, "PST").alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 2, 30))]
>>> df.select(from_utc_timestamp(df.ts, df.tz).alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 19, 30))]
.. note:: Deprecated in 3.0. See SPARK-25496
"""
warnings.warn("Deprecated in 3.0. See SPARK-25496", DeprecationWarning)
sc = SparkContext._active_spark_context
if isinstance(tz, Column):
tz = _to_java_column(tz)
return Column(sc._jvm.functions.from_utc_timestamp(_to_java_column(timestamp), tz))
@since(1.5)
def to_utc_timestamp(timestamp, tz):
"""
This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function
takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in the given
timezone, and renders that timestamp as a timestamp in UTC.
However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not
timezone-agnostic. So in Spark this function just shift the timestamp value from the given
timezone to UTC timezone.
This function may return confusing result if the input is a string with timezone, e.g.
'2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp
according to the timezone in the string, and finally display the result by converting the
timestamp to string according to the session local timezone.
:param timestamp: the column that contains timestamps
:param tz: a string that has the ID of timezone, e.g. "GMT", "America/Los_Angeles", etc
.. versionchanged:: 2.4
`tz` can take a :class:`Column` containing timezone ID strings.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz'])
>>> df.select(to_utc_timestamp(df.ts, "PST").alias('utc_time')).collect()
[Row(utc_time=datetime.datetime(1997, 2, 28, 18, 30))]
>>> df.select(to_utc_timestamp(df.ts, df.tz).alias('utc_time')).collect()
[Row(utc_time=datetime.datetime(1997, 2, 28, 1, 30))]
.. note:: Deprecated in 3.0. See SPARK-25496
"""
warnings.warn("Deprecated in 3.0. See SPARK-25496", DeprecationWarning)
sc = SparkContext._active_spark_context
if isinstance(tz, Column):
tz = _to_java_column(tz)
return Column(sc._jvm.functions.to_utc_timestamp(_to_java_column(timestamp), tz))
@since(2.0)
@ignore_unicode_prefix
def window(timeColumn, windowDuration, slideDuration=None, startTime=None):
"""Bucketize rows into one or more time windows given a timestamp specifying column. Window
starts are inclusive but the window ends are exclusive, e.g. 12:05 will be in the window
[12:05,12:10) but not in [12:00,12:05). Windows can support microsecond precision. Windows in
the order of months are not supported.
The time column must be of :class:`pyspark.sql.types.TimestampType`.
Durations are provided as strings, e.g. '1 second', '1 day 12 hours', '2 minutes'. Valid
interval strings are 'week', 'day', 'hour', 'minute', 'second', 'millisecond', 'microsecond'.
If the ``slideDuration`` is not provided, the windows will be tumbling windows.
The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start
window intervals. For example, in order to have hourly tumbling windows that start 15 minutes
past the hour, e.g. 12:15-13:15, 13:15-14:15... provide `startTime` as `15 minutes`.
The output column will be a struct called 'window' by default with the nested columns 'start'
and 'end', where 'start' and 'end' will be of :class:`pyspark.sql.types.TimestampType`.
>>> df = spark.createDataFrame([("2016-03-11 09:00:07", 1)]).toDF("date", "val")
>>> w = df.groupBy(window("date", "5 seconds")).agg(sum("val").alias("sum"))
>>> w.select(w.window.start.cast("string").alias("start"),
... w.window.end.cast("string").alias("end"), "sum").collect()
[Row(start=u'2016-03-11 09:00:05', end=u'2016-03-11 09:00:10', sum=1)]
"""
def check_string_field(field, fieldName):
if not field or type(field) is not str:
raise TypeError("%s should be provided as a string" % fieldName)
sc = SparkContext._active_spark_context
time_col = _to_java_column(timeColumn)
check_string_field(windowDuration, "windowDuration")
if slideDuration and startTime:
check_string_field(slideDuration, "slideDuration")
check_string_field(startTime, "startTime")
res = sc._jvm.functions.window(time_col, windowDuration, slideDuration, startTime)
elif slideDuration:
check_string_field(slideDuration, "slideDuration")
res = sc._jvm.functions.window(time_col, windowDuration, slideDuration)
elif startTime:
check_string_field(startTime, "startTime")
res = sc._jvm.functions.window(time_col, windowDuration, windowDuration, startTime)
else:
res = sc._jvm.functions.window(time_col, windowDuration)
return Column(res)
# ---------------------------- misc functions ----------------------------------
@since(1.5)
@ignore_unicode_prefix
def crc32(col):
"""
Calculates the cyclic redundancy check value (CRC32) of a binary column and
returns the value as a bigint.
>>> spark.createDataFrame([('ABC',)], ['a']).select(crc32('a').alias('crc32')).collect()
[Row(crc32=2743272264)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.crc32(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def md5(col):
"""Calculates the MD5 digest and returns the value as a 32 character hex string.
>>> spark.createDataFrame([('ABC',)], ['a']).select(md5('a').alias('hash')).collect()
[Row(hash=u'902fbdd2b1df0c4f70b4a5d23525e932')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.md5(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def sha1(col):
"""Returns the hex string result of SHA-1.
>>> spark.createDataFrame([('ABC',)], ['a']).select(sha1('a').alias('hash')).collect()
[Row(hash=u'3c01bdbb26f358bab27f267924aa2c9a03fcfdb8')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.sha1(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def sha2(col, numBits):
"""Returns the hex string result of SHA-2 family of hash functions (SHA-224, SHA-256, SHA-384,
and SHA-512). The numBits indicates the desired bit length of the result, which must have a
value of 224, 256, 384, 512, or 0 (which is equivalent to 256).
>>> digests = df.select(sha2(df.name, 256).alias('s')).collect()
>>> digests[0]
Row(s=u'3bc51062973c458d5a6f2d8d64a023246354ad7e064b1e4e009ec8a0699a3043')
>>> digests[1]
Row(s=u'cd9fb1e148ccd8442e5aa74904cc73bf6fb54d1d54d333bd596aa9bb4bb4e961')
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.sha2(_to_java_column(col), numBits)
return Column(jc)
@since(2.0)
def hash(*cols):
"""Calculates the hash code of given columns, and returns the result as an int column.
>>> spark.createDataFrame([('ABC',)], ['a']).select(hash('a').alias('hash')).collect()
[Row(hash=-757602832)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.hash(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(3.0)
def xxhash64(*cols):
"""Calculates the hash code of given columns using the 64-bit variant of the xxHash algorithm,
and returns the result as a long column.
>>> spark.createDataFrame([('ABC',)], ['a']).select(xxhash64('a').alias('hash')).collect()
[Row(hash=4105715581806190027)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.xxhash64(_to_seq(sc, cols, _to_java_column))
return Column(jc)
# ---------------------- String/Binary functions ------------------------------
_string_functions = {
'upper': 'Converts a string expression to upper case.',
'lower': 'Converts a string expression to lower case.',
'ascii': 'Computes the numeric value of the first character of the string column.',
'base64': 'Computes the BASE64 encoding of a binary column and returns it as a string column.',
'unbase64': 'Decodes a BASE64 encoded string column and returns it as a binary column.',
'ltrim': 'Trim the spaces from left end for the specified string value.',
'rtrim': 'Trim the spaces from right end for the specified string value.',
'trim': 'Trim the spaces from both ends for the specified string column.',
}
for _name, _doc in _string_functions.items():
globals()[_name] = since(1.5)(_create_function_over_column(_name, _doc))
del _name, _doc
@since(1.5)
@ignore_unicode_prefix
def concat_ws(sep, *cols):
"""
Concatenates multiple input string columns together into a single string column,
using the given separator.
>>> df = spark.createDataFrame([('abcd','123')], ['s', 'd'])
>>> df.select(concat_ws('-', df.s, df.d).alias('s')).collect()
[Row(s=u'abcd-123')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.concat_ws(sep, _to_seq(sc, cols, _to_java_column)))
@since(1.5)
def decode(col, charset):
"""
Computes the first argument into a string from a binary using the provided character set
(one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.decode(_to_java_column(col), charset))
@since(1.5)
def encode(col, charset):
"""
Computes the first argument into a binary from a string using the provided character set
(one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.encode(_to_java_column(col), charset))
@ignore_unicode_prefix
@since(1.5)
def format_number(col, d):
"""
Formats the number X to a format like '#,--#,--#.--', rounded to d decimal places
with HALF_EVEN round mode, and returns the result as a string.
:param col: the column name of the numeric value to be formatted
:param d: the N decimal places
>>> spark.createDataFrame([(5,)], ['a']).select(format_number('a', 4).alias('v')).collect()
[Row(v=u'5.0000')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.format_number(_to_java_column(col), d))
@ignore_unicode_prefix
@since(1.5)
def format_string(format, *cols):
"""
Formats the arguments in printf-style and returns the result as a string column.
:param format: string that can contain embedded format tags and used as result column's value
:param cols: list of column names (string) or list of :class:`Column` expressions to
be used in formatting
>>> df = spark.createDataFrame([(5, "hello")], ['a', 'b'])
>>> df.select(format_string('%d %s', df.a, df.b).alias('v')).collect()
[Row(v=u'5 hello')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.format_string(format, _to_seq(sc, cols, _to_java_column)))
@since(1.5)
def instr(str, substr):
"""
Locate the position of the first occurrence of substr column in the given string.
Returns null if either of the arguments are null.
.. note:: The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(instr(df.s, 'b').alias('s')).collect()
[Row(s=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.instr(_to_java_column(str), substr))
@since(1.5)
@ignore_unicode_prefix
def substring(str, pos, len):
"""
Substring starts at `pos` and is of length `len` when str is String type or
returns the slice of byte array that starts at `pos` in byte and is of length `len`
when str is Binary type.
.. note:: The position is not zero based, but 1 based index.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(substring(df.s, 1, 2).alias('s')).collect()
[Row(s=u'ab')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.substring(_to_java_column(str), pos, len))
@since(1.5)
@ignore_unicode_prefix
def substring_index(str, delim, count):
"""
Returns the substring from string str before count occurrences of the delimiter delim.
If count is positive, everything the left of the final delimiter (counting from left) is
returned. If count is negative, every to the right of the final delimiter (counting from the
right) is returned. substring_index performs a case-sensitive match when searching for delim.
>>> df = spark.createDataFrame([('a.b.c.d',)], ['s'])
>>> df.select(substring_index(df.s, '.', 2).alias('s')).collect()
[Row(s=u'a.b')]
>>> df.select(substring_index(df.s, '.', -3).alias('s')).collect()
[Row(s=u'b.c.d')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.substring_index(_to_java_column(str), delim, count))
@ignore_unicode_prefix
@since(1.5)
def levenshtein(left, right):
"""Computes the Levenshtein distance of the two given strings.
>>> df0 = spark.createDataFrame([('kitten', 'sitting',)], ['l', 'r'])
>>> df0.select(levenshtein('l', 'r').alias('d')).collect()
[Row(d=3)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.levenshtein(_to_java_column(left), _to_java_column(right))
return Column(jc)
@since(1.5)
def locate(substr, str, pos=1):
"""
Locate the position of the first occurrence of substr in a string column, after position pos.
.. note:: The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
:param substr: a string
:param str: a Column of :class:`pyspark.sql.types.StringType`
:param pos: start position (zero based)
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(locate('b', df.s, 1).alias('s')).collect()
[Row(s=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.locate(substr, _to_java_column(str), pos))
@since(1.5)
@ignore_unicode_prefix
def lpad(col, len, pad):
"""
Left-pad the string column to width `len` with `pad`.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(lpad(df.s, 6, '#').alias('s')).collect()
[Row(s=u'##abcd')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lpad(_to_java_column(col), len, pad))
@since(1.5)
@ignore_unicode_prefix
def rpad(col, len, pad):
"""
Right-pad the string column to width `len` with `pad`.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(rpad(df.s, 6, '#').alias('s')).collect()
[Row(s=u'abcd##')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.rpad(_to_java_column(col), len, pad))
@since(1.5)
@ignore_unicode_prefix
def repeat(col, n):
"""
Repeats a string column n times, and returns it as a new string column.
>>> df = spark.createDataFrame([('ab',)], ['s',])
>>> df.select(repeat(df.s, 3).alias('s')).collect()
[Row(s=u'ababab')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.repeat(_to_java_column(col), n))
@since(1.5)
@ignore_unicode_prefix
def split(str, pattern, limit=-1):
"""
Splits str around matches of the given pattern.
:param str: a string expression to split
:param pattern: a string representing a regular expression. The regex string should be
a Java regular expression.
:param limit: an integer which controls the number of times `pattern` is applied.
* ``limit > 0``: The resulting array's length will not be more than `limit`, and the
resulting array's last entry will contain all input beyond the last
matched pattern.
* ``limit <= 0``: `pattern` will be applied as many times as possible, and the resulting
array can be of any size.
.. versionchanged:: 3.0
`split` now takes an optional `limit` field. If not provided, default limit value is -1.
>>> df = spark.createDataFrame([('oneAtwoBthreeC',)], ['s',])
>>> df.select(split(df.s, '[ABC]', 2).alias('s')).collect()
[Row(s=[u'one', u'twoBthreeC'])]
>>> df.select(split(df.s, '[ABC]', -1).alias('s')).collect()
[Row(s=[u'one', u'two', u'three', u''])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.split(_to_java_column(str), pattern, limit))
@ignore_unicode_prefix
@since(1.5)
def regexp_extract(str, pattern, idx):
r"""Extract a specific group matched by a Java regex, from the specified string column.
If the regex did not match, or the specified group did not match, an empty string is returned.
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_extract('str', r'(\d+)-(\d+)', 1).alias('d')).collect()
[Row(d=u'100')]
>>> df = spark.createDataFrame([('foo',)], ['str'])
>>> df.select(regexp_extract('str', r'(\d+)', 1).alias('d')).collect()
[Row(d=u'')]
>>> df = spark.createDataFrame([('aaaac',)], ['str'])
>>> df.select(regexp_extract('str', '(a+)(b)?(c)', 2).alias('d')).collect()
[Row(d=u'')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_extract(_to_java_column(str), pattern, idx)
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def regexp_replace(str, pattern, replacement):
r"""Replace all substrings of the specified string value that match regexp with rep.
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_replace('str', r'(\d+)', '--').alias('d')).collect()
[Row(d=u'-----')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_replace(_to_java_column(str), pattern, replacement)
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def initcap(col):
"""Translate the first letter of each word to upper case in the sentence.
>>> spark.createDataFrame([('ab cd',)], ['a']).select(initcap("a").alias('v')).collect()
[Row(v=u'Ab Cd')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.initcap(_to_java_column(col)))
@since(1.5)
@ignore_unicode_prefix
def soundex(col):
"""
Returns the SoundEx encoding for a string
>>> df = spark.createDataFrame([("Peters",),("Uhrbach",)], ['name'])
>>> df.select(soundex(df.name).alias("soundex")).collect()
[Row(soundex=u'P362'), Row(soundex=u'U612')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.soundex(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def bin(col):
"""Returns the string representation of the binary value of the given column.
>>> df.select(bin(df.age).alias('c')).collect()
[Row(c=u'10'), Row(c=u'101')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.bin(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def hex(col):
"""Computes hex value of the given column, which could be :class:`pyspark.sql.types.StringType`,
:class:`pyspark.sql.types.BinaryType`, :class:`pyspark.sql.types.IntegerType` or
:class:`pyspark.sql.types.LongType`.
>>> spark.createDataFrame([('ABC', 3)], ['a', 'b']).select(hex('a'), hex('b')).collect()
[Row(hex(a)=u'414243', hex(b)=u'3')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.hex(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def unhex(col):
"""Inverse of hex. Interprets each pair of characters as a hexadecimal number
and converts to the byte representation of number.
>>> spark.createDataFrame([('414243',)], ['a']).select(unhex('a')).collect()
[Row(unhex(a)=bytearray(b'ABC'))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.unhex(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def length(col):
"""Computes the character length of string data or number of bytes of binary data.
The length of character data includes the trailing spaces. The length of binary data
includes binary zeros.
>>> spark.createDataFrame([('ABC ',)], ['a']).select(length('a').alias('length')).collect()
[Row(length=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.length(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def translate(srcCol, matching, replace):
"""A function translate any character in the `srcCol` by a character in `matching`.
The characters in `replace` is corresponding to the characters in `matching`.
The translate will happen when any character in the string matching with the character
in the `matching`.
>>> spark.createDataFrame([('translate',)], ['a']).select(translate('a', "rnlt", "123") \\
... .alias('r')).collect()
[Row(r=u'1a2s3ae')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.translate(_to_java_column(srcCol), matching, replace))
# ---------------------- Collection functions ------------------------------
@ignore_unicode_prefix
@since(2.0)
def create_map(*cols):
"""Creates a new map column.
:param cols: list of column names (string) or list of :class:`Column` expressions that are
grouped as key-value pairs, e.g. (key1, value1, key2, value2, ...).
>>> df.select(create_map('name', 'age').alias("map")).collect()
[Row(map={u'Alice': 2}), Row(map={u'Bob': 5})]
>>> df.select(create_map([df.name, df.age]).alias("map")).collect()
[Row(map={u'Alice': 2}), Row(map={u'Bob': 5})]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.map(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(2.4)
def map_from_arrays(col1, col2):
"""Creates a new map from two arrays.
:param col1: name of column containing a set of keys. All elements should not be null
:param col2: name of column containing a set of values
>>> df = spark.createDataFrame([([2, 5], ['a', 'b'])], ['k', 'v'])
>>> df.select(map_from_arrays(df.k, df.v).alias("map")).show()
+----------------+
| map|
+----------------+
|[2 -> a, 5 -> b]|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_from_arrays(_to_java_column(col1), _to_java_column(col2)))
@since(1.4)
def array(*cols):
"""Creates a new array column.
:param cols: list of column names (string) or list of :class:`Column` expressions that have
the same data type.
>>> df.select(array('age', 'age').alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
>>> df.select(array([df.age, df.age]).alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.array(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.5)
def array_contains(col, value):
"""
Collection function: returns null if the array is null, true if the array contains the
given value, and false otherwise.
:param col: name of column containing array
:param value: value to check for in array
>>> df = spark.createDataFrame([(["a", "b", "c"],), ([],)], ['data'])
>>> df.select(array_contains(df.data, "a")).collect()
[Row(array_contains(data, a)=True), Row(array_contains(data, a)=False)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_contains(_to_java_column(col), value))
@since(2.4)
def arrays_overlap(a1, a2):
"""
Collection function: returns true if the arrays contain any common non-null element; if not,
returns null if both the arrays are non-empty and any of them contains a null element; returns
false otherwise.
>>> df = spark.createDataFrame([(["a", "b"], ["b", "c"]), (["a"], ["b", "c"])], ['x', 'y'])
>>> df.select(arrays_overlap(df.x, df.y).alias("overlap")).collect()
[Row(overlap=True), Row(overlap=False)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.arrays_overlap(_to_java_column(a1), _to_java_column(a2)))
@since(2.4)
def slice(x, start, length):
"""
Collection function: returns an array containing all the elements in `x` from index `start`
(array indices start at 1, or from the end if `start` is negative) with the specified `length`.
>>> df = spark.createDataFrame([([1, 2, 3],), ([4, 5],)], ['x'])
>>> df.select(slice(df.x, 2, 2).alias("sliced")).collect()
[Row(sliced=[2, 3]), Row(sliced=[5])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.slice(_to_java_column(x), start, length))
@ignore_unicode_prefix
@since(2.4)
def array_join(col, delimiter, null_replacement=None):
"""
Concatenates the elements of `column` using the `delimiter`. Null values are replaced with
`null_replacement` if set, otherwise they are ignored.
>>> df = spark.createDataFrame([(["a", "b", "c"],), (["a", None],)], ['data'])
>>> df.select(array_join(df.data, ",").alias("joined")).collect()
[Row(joined=u'a,b,c'), Row(joined=u'a')]
>>> df.select(array_join(df.data, ",", "NULL").alias("joined")).collect()
[Row(joined=u'a,b,c'), Row(joined=u'a,NULL')]
"""
sc = SparkContext._active_spark_context
if null_replacement is None:
return Column(sc._jvm.functions.array_join(_to_java_column(col), delimiter))
else:
return Column(sc._jvm.functions.array_join(
_to_java_column(col), delimiter, null_replacement))
@since(1.5)
@ignore_unicode_prefix
def concat(*cols):
"""
Concatenates multiple input columns together into a single column.
The function works with strings, binary and compatible array columns.
>>> df = spark.createDataFrame([('abcd','123')], ['s', 'd'])
>>> df.select(concat(df.s, df.d).alias('s')).collect()
[Row(s=u'abcd123')]
>>> df = spark.createDataFrame([([1, 2], [3, 4], [5]), ([1, 2], None, [3])], ['a', 'b', 'c'])
>>> df.select(concat(df.a, df.b, df.c).alias("arr")).collect()
[Row(arr=[1, 2, 3, 4, 5]), Row(arr=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.concat(_to_seq(sc, cols, _to_java_column)))
@since(2.4)
def array_position(col, value):
"""
Collection function: Locates the position of the first occurrence of the given value
in the given array. Returns null if either of the arguments are null.
.. note:: The position is not zero based, but 1 based index. Returns 0 if the given
value could not be found in the array.
>>> df = spark.createDataFrame([(["c", "b", "a"],), ([],)], ['data'])
>>> df.select(array_position(df.data, "a")).collect()
[Row(array_position(data, a)=3), Row(array_position(data, a)=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_position(_to_java_column(col), value))
@ignore_unicode_prefix
@since(2.4)
def element_at(col, extraction):
"""
Collection function: Returns element of array at given index in extraction if col is array.
Returns value for the given key in extraction if col is map.
:param col: name of column containing array or map
:param extraction: index to check for in array or key to check for in map
.. note:: The position is not zero based, but 1 based index.
>>> df = spark.createDataFrame([(["a", "b", "c"],), ([],)], ['data'])
>>> df.select(element_at(df.data, 1)).collect()
[Row(element_at(data, 1)=u'a'), Row(element_at(data, 1)=None)]
>>> df = spark.createDataFrame([({"a": 1.0, "b": 2.0},), ({},)], ['data'])
>>> df.select(element_at(df.data, "a")).collect()
[Row(element_at(data, a)=1.0), Row(element_at(data, a)=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.element_at(_to_java_column(col), extraction))
@since(2.4)
def array_remove(col, element):
"""
Collection function: Remove all elements that equal to element from the given array.
:param col: name of column containing array
:param element: element to be removed from the array
>>> df = spark.createDataFrame([([1, 2, 3, 1, 1],), ([],)], ['data'])
>>> df.select(array_remove(df.data, 1)).collect()
[Row(array_remove(data, 1)=[2, 3]), Row(array_remove(data, 1)=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_remove(_to_java_column(col), element))
@since(2.4)
def array_distinct(col):
"""
Collection function: removes duplicate values from the array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 2, 3, 2],), ([4, 5, 5, 4],)], ['data'])
>>> df.select(array_distinct(df.data)).collect()
[Row(array_distinct(data)=[1, 2, 3]), Row(array_distinct(data)=[4, 5])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_distinct(_to_java_column(col)))
@ignore_unicode_prefix
@since(2.4)
def array_intersect(col1, col2):
"""
Collection function: returns an array of the elements in the intersection of col1 and col2,
without duplicates.
:param col1: name of column containing array
:param col2: name of column containing array
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_intersect(df.c1, df.c2)).collect()
[Row(array_intersect(c1, c2)=[u'a', u'c'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_intersect(_to_java_column(col1), _to_java_column(col2)))
@ignore_unicode_prefix
@since(2.4)
def array_union(col1, col2):
"""
Collection function: returns an array of the elements in the union of col1 and col2,
without duplicates.
:param col1: name of column containing array
:param col2: name of column containing array
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_union(df.c1, df.c2)).collect()
[Row(array_union(c1, c2)=[u'b', u'a', u'c', u'd', u'f'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_union(_to_java_column(col1), _to_java_column(col2)))
@ignore_unicode_prefix
@since(2.4)
def array_except(col1, col2):
"""
Collection function: returns an array of the elements in col1 but not in col2,
without duplicates.
:param col1: name of column containing array
:param col2: name of column containing array
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_except(df.c1, df.c2)).collect()
[Row(array_except(c1, c2)=[u'b'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_except(_to_java_column(col1), _to_java_column(col2)))
@since(1.4)
def explode(col):
"""
Returns a new row for each element in the given array or map.
Uses the default column name `col` for elements in the array and
`key` and `value` for elements in the map unless specified otherwise.
>>> from pyspark.sql import Row
>>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(explode(eDF.intlist).alias("anInt")).collect()
[Row(anInt=1), Row(anInt=2), Row(anInt=3)]
>>> eDF.select(explode(eDF.mapfield).alias("key", "value")).show()
+---+-----+
|key|value|
+---+-----+
| a| b|
+---+-----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode(_to_java_column(col))
return Column(jc)
@since(2.1)
def posexplode(col):
"""
Returns a new row for each element with position in the given array or map.
Uses the default column name `pos` for position, and `col` for elements in the
array and `key` and `value` for elements in the map unless specified otherwise.
>>> from pyspark.sql import Row
>>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(posexplode(eDF.intlist)).collect()
[Row(pos=0, col=1), Row(pos=1, col=2), Row(pos=2, col=3)]
>>> eDF.select(posexplode(eDF.mapfield)).show()
+---+---+-----+
|pos|key|value|
+---+---+-----+
| 0| a| b|
+---+---+-----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.posexplode(_to_java_column(col))
return Column(jc)
@since(2.3)
def explode_outer(col):
"""
Returns a new row for each element in the given array or map.
Unlike explode, if the array/map is null or empty then null is produced.
Uses the default column name `col` for elements in the array and
`key` and `value` for elements in the map unless specified otherwise.
>>> df = spark.createDataFrame(
... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)],
... ("id", "an_array", "a_map")
... )
>>> df.select("id", "an_array", explode_outer("a_map")).show()
+---+----------+----+-----+
| id| an_array| key|value|
+---+----------+----+-----+
| 1|[foo, bar]| x| 1.0|
| 2| []|null| null|
| 3| null|null| null|
+---+----------+----+-----+
>>> df.select("id", "a_map", explode_outer("an_array")).show()
+---+----------+----+
| id| a_map| col|
+---+----------+----+
| 1|[x -> 1.0]| foo|
| 1|[x -> 1.0]| bar|
| 2| []|null|
| 3| null|null|
+---+----------+----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode_outer(_to_java_column(col))
return Column(jc)
@since(2.3)
def posexplode_outer(col):
"""
Returns a new row for each element with position in the given array or map.
Unlike posexplode, if the array/map is null or empty then the row (null, null) is produced.
Uses the default column name `pos` for position, and `col` for elements in the
array and `key` and `value` for elements in the map unless specified otherwise.
>>> df = spark.createDataFrame(
... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)],
... ("id", "an_array", "a_map")
... )
>>> df.select("id", "an_array", posexplode_outer("a_map")).show()
+---+----------+----+----+-----+
| id| an_array| pos| key|value|
+---+----------+----+----+-----+
| 1|[foo, bar]| 0| x| 1.0|
| 2| []|null|null| null|
| 3| null|null|null| null|
+---+----------+----+----+-----+
>>> df.select("id", "a_map", posexplode_outer("an_array")).show()
+---+----------+----+----+
| id| a_map| pos| col|
+---+----------+----+----+
| 1|[x -> 1.0]| 0| foo|
| 1|[x -> 1.0]| 1| bar|
| 2| []|null|null|
| 3| null|null|null|
+---+----------+----+----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.posexplode_outer(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.6)
def get_json_object(col, path):
"""
Extracts json object from a json string based on json path specified, and returns json string
of the extracted json object. It will return null if the input json string is invalid.
:param col: string column in json format
:param path: path to the json object to extract
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, get_json_object(df.jstring, '$.f1').alias("c0"), \\
... get_json_object(df.jstring, '$.f2').alias("c1") ).collect()
[Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.get_json_object(_to_java_column(col), path)
return Column(jc)
@ignore_unicode_prefix
@since(1.6)
def json_tuple(col, *fields):
"""Creates a new row for a json column according to the given field names.
:param col: string column in json format
:param fields: list of fields to extract
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, json_tuple(df.jstring, 'f1', 'f2')).collect()
[Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.json_tuple(_to_java_column(col), _to_seq(sc, fields))
return Column(jc)
@ignore_unicode_prefix
@since(2.1)
def from_json(col, schema, options={}):
"""
Parses a column containing a JSON string into a :class:`MapType` with :class:`StringType`
as keys type, :class:`StructType` or :class:`ArrayType` with
the specified schema. Returns `null`, in the case of an unparseable string.
:param col: string column in json format
:param schema: a StructType or ArrayType of StructType to use when parsing the json column.
:param options: options to control parsing. accepts the same options as the json datasource
.. note:: Since Spark 2.3, the DDL-formatted string or a JSON format string is also
supported for ``schema``.
>>> from pyspark.sql.types import *
>>> data = [(1, '''{"a": 1}''')]
>>> schema = StructType([StructField("a", IntegerType())])
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=Row(a=1))]
>>> df.select(from_json(df.value, "a INT").alias("json")).collect()
[Row(json=Row(a=1))]
>>> df.select(from_json(df.value, "MAP<STRING,INT>").alias("json")).collect()
[Row(json={u'a': 1})]
>>> data = [(1, '''[{"a": 1}]''')]
>>> schema = ArrayType(StructType([StructField("a", IntegerType())]))
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=[Row(a=1)])]
>>> schema = schema_of_json(lit('''{"a": 0}'''))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=Row(a=None))]
>>> data = [(1, '''[1, 2, 3]''')]
>>> schema = ArrayType(IntegerType())
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=[1, 2, 3])]
"""
sc = SparkContext._active_spark_context
if isinstance(schema, DataType):
schema = schema.json()
elif isinstance(schema, Column):
schema = _to_java_column(schema)
jc = sc._jvm.functions.from_json(_to_java_column(col), schema, _options_to_str(options))
return Column(jc)
@ignore_unicode_prefix
@since(2.1)
def to_json(col, options={}):
"""
Converts a column containing a :class:`StructType`, :class:`ArrayType` or a :class:`MapType`
into a JSON string. Throws an exception, in the case of an unsupported type.
:param col: name of column containing a struct, an array or a map.
:param options: options to control converting. accepts the same options as the JSON datasource.
Additionally the function supports the `pretty` option which enables
pretty JSON generation.
>>> from pyspark.sql import Row
>>> from pyspark.sql.types import *
>>> data = [(1, Row(name='Alice', age=2))]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'{"age":2,"name":"Alice"}')]
>>> data = [(1, [Row(name='Alice', age=2), Row(name='Bob', age=3)])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'[{"age":2,"name":"Alice"},{"age":3,"name":"Bob"}]')]
>>> data = [(1, {"name": "Alice"})]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'{"name":"Alice"}')]
>>> data = [(1, [{"name": "Alice"}, {"name": "Bob"}])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'[{"name":"Alice"},{"name":"Bob"}]')]
>>> data = [(1, ["Alice", "Bob"])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'["Alice","Bob"]')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.to_json(_to_java_column(col), _options_to_str(options))
return Column(jc)
@ignore_unicode_prefix
@since(2.4)
def schema_of_json(json, options={}):
"""
Parses a JSON string and infers its schema in DDL format.
:param json: a JSON string or a string literal containing a JSON string.
:param options: options to control parsing. accepts the same options as the JSON datasource
.. versionchanged:: 3.0
It accepts `options` parameter to control schema inferring.
>>> df = spark.range(1)
>>> df.select(schema_of_json(lit('{"a": 0}')).alias("json")).collect()
[Row(json=u'struct<a:bigint>')]
>>> schema = schema_of_json('{a: 1}', {'allowUnquotedFieldNames':'true'})
>>> df.select(schema.alias("json")).collect()
[Row(json=u'struct<a:bigint>')]
"""
if isinstance(json, basestring):
col = _create_column_from_literal(json)
elif isinstance(json, Column):
col = _to_java_column(json)
else:
raise TypeError("schema argument should be a column or string")
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.schema_of_json(col, _options_to_str(options))
return Column(jc)
@ignore_unicode_prefix
@since(3.0)
def schema_of_csv(csv, options={}):
"""
Parses a CSV string and infers its schema in DDL format.
:param col: a CSV string or a string literal containing a CSV string.
:param options: options to control parsing. accepts the same options as the CSV datasource
>>> df = spark.range(1)
>>> df.select(schema_of_csv(lit('1|a'), {'sep':'|'}).alias("csv")).collect()
[Row(csv=u'struct<_c0:int,_c1:string>')]
>>> df.select(schema_of_csv('1|a', {'sep':'|'}).alias("csv")).collect()
[Row(csv=u'struct<_c0:int,_c1:string>')]
"""
if isinstance(csv, basestring):
col = _create_column_from_literal(csv)
elif isinstance(csv, Column):
col = _to_java_column(csv)
else:
raise TypeError("schema argument should be a column or string")
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.schema_of_csv(col, _options_to_str(options))
return Column(jc)
@ignore_unicode_prefix
@since(3.0)
def to_csv(col, options={}):
"""
Converts a column containing a :class:`StructType` into a CSV string.
Throws an exception, in the case of an unsupported type.
:param col: name of column containing a struct.
:param options: options to control converting. accepts the same options as the CSV datasource.
>>> from pyspark.sql import Row
>>> data = [(1, Row(name='Alice', age=2))]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_csv(df.value).alias("csv")).collect()
[Row(csv=u'2,Alice')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.to_csv(_to_java_column(col), _options_to_str(options))
return Column(jc)
@since(1.5)
def size(col):
"""
Collection function: returns the length of the array or map stored in the column.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 2, 3],),([1],),([],)], ['data'])
>>> df.select(size(df.data)).collect()
[Row(size(data)=3), Row(size(data)=1), Row(size(data)=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.size(_to_java_column(col)))
@since(2.4)
def array_min(col):
"""
Collection function: returns the minimum value of the array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, 3],), ([None, 10, -1],)], ['data'])
>>> df.select(array_min(df.data).alias('min')).collect()
[Row(min=1), Row(min=-1)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_min(_to_java_column(col)))
@since(2.4)
def array_max(col):
"""
Collection function: returns the maximum value of the array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, 3],), ([None, 10, -1],)], ['data'])
>>> df.select(array_max(df.data).alias('max')).collect()
[Row(max=3), Row(max=10)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_max(_to_java_column(col)))
@since(1.5)
def sort_array(col, asc=True):
"""
Collection function: sorts the input array in ascending or descending order according
to the natural ordering of the array elements. Null elements will be placed at the beginning
of the returned array in ascending order or at the end of the returned array in descending
order.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data'])
>>> df.select(sort_array(df.data).alias('r')).collect()
[Row(r=[None, 1, 2, 3]), Row(r=[1]), Row(r=[])]
>>> df.select(sort_array(df.data, asc=False).alias('r')).collect()
[Row(r=[3, 2, 1, None]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.sort_array(_to_java_column(col), asc))
@since(2.4)
def array_sort(col):
"""
Collection function: sorts the input array in ascending order. The elements of the input array
must be orderable. Null elements will be placed at the end of the returned array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data'])
>>> df.select(array_sort(df.data).alias('r')).collect()
[Row(r=[1, 2, 3, None]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_sort(_to_java_column(col)))
@since(2.4)
def shuffle(col):
"""
Collection function: Generates a random permutation of the given array.
.. note:: The function is non-deterministic.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 20, 3, 5],), ([1, 20, None, 3],)], ['data'])
>>> df.select(shuffle(df.data).alias('s')).collect() # doctest: +SKIP
[Row(s=[3, 1, 5, 20]), Row(s=[20, None, 3, 1])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.shuffle(_to_java_column(col)))
@since(1.5)
@ignore_unicode_prefix
def reverse(col):
"""
Collection function: returns a reversed string or an array with reverse order of elements.
:param col: name of column or expression
>>> df = spark.createDataFrame([('Spark SQL',)], ['data'])
>>> df.select(reverse(df.data).alias('s')).collect()
[Row(s=u'LQS krapS')]
>>> df = spark.createDataFrame([([2, 1, 3],) ,([1],) ,([],)], ['data'])
>>> df.select(reverse(df.data).alias('r')).collect()
[Row(r=[3, 1, 2]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.reverse(_to_java_column(col)))
@since(2.4)
def flatten(col):
"""
Collection function: creates a single array from an array of arrays.
If a structure of nested arrays is deeper than two levels,
only one level of nesting is removed.
:param col: name of column or expression
>>> df = spark.createDataFrame([([[1, 2, 3], [4, 5], [6]],), ([None, [4, 5]],)], ['data'])
>>> df.select(flatten(df.data).alias('r')).collect()
[Row(r=[1, 2, 3, 4, 5, 6]), Row(r=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.flatten(_to_java_column(col)))
@since(2.3)
def map_keys(col):
"""
Collection function: Returns an unordered array containing the keys of the map.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_keys
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_keys("data").alias("keys")).show()
+------+
| keys|
+------+
|[1, 2]|
+------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_keys(_to_java_column(col)))
@since(2.3)
def map_values(col):
"""
Collection function: Returns an unordered array containing the values of the map.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_values
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_values("data").alias("values")).show()
+------+
|values|
+------+
|[a, b]|
+------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_values(_to_java_column(col)))
@since(3.0)
def map_entries(col):
"""
Collection function: Returns an unordered array of all entries in the given map.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_entries
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_entries("data").alias("entries")).show()
+----------------+
| entries|
+----------------+
|[[1, a], [2, b]]|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_entries(_to_java_column(col)))
@since(2.4)
def map_from_entries(col):
"""
Collection function: Returns a map created from the given array of entries.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_from_entries
>>> df = spark.sql("SELECT array(struct(1, 'a'), struct(2, 'b')) as data")
>>> df.select(map_from_entries("data").alias("map")).show()
+----------------+
| map|
+----------------+
|[1 -> a, 2 -> b]|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_from_entries(_to_java_column(col)))
@ignore_unicode_prefix
@since(2.4)
def array_repeat(col, count):
"""
Collection function: creates an array containing a column repeated count times.
>>> df = spark.createDataFrame([('ab',)], ['data'])
>>> df.select(array_repeat(df.data, 3).alias('r')).collect()
[Row(r=[u'ab', u'ab', u'ab'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_repeat(
_to_java_column(col),
_to_java_column(count) if isinstance(count, Column) else count
))
@since(2.4)
def arrays_zip(*cols):
"""
Collection function: Returns a merged array of structs in which the N-th struct contains all
N-th values of input arrays.
:param cols: columns of arrays to be merged.
>>> from pyspark.sql.functions import arrays_zip
>>> df = spark.createDataFrame([(([1, 2, 3], [2, 3, 4]))], ['vals1', 'vals2'])
>>> df.select(arrays_zip(df.vals1, df.vals2).alias('zipped')).collect()
[Row(zipped=[Row(vals1=1, vals2=2), Row(vals1=2, vals2=3), Row(vals1=3, vals2=4)])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.arrays_zip(_to_seq(sc, cols, _to_java_column)))
@since(2.4)
def map_concat(*cols):
"""Returns the union of all the given maps.
:param cols: list of column names (string) or list of :class:`Column` expressions
>>> from pyspark.sql.functions import map_concat
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as map1, map(3, 'c', 1, 'd') as map2")
>>> df.select(map_concat("map1", "map2").alias("map3")).show(truncate=False)
+------------------------+
|map3 |
+------------------------+
|[1 -> d, 2 -> b, 3 -> c]|
+------------------------+
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.map_concat(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(2.4)
def sequence(start, stop, step=None):
"""
Generate a sequence of integers from `start` to `stop`, incrementing by `step`.
If `step` is not set, incrementing by 1 if `start` is less than or equal to `stop`,
otherwise -1.
>>> df1 = spark.createDataFrame([(-2, 2)], ('C1', 'C2'))
>>> df1.select(sequence('C1', 'C2').alias('r')).collect()
[Row(r=[-2, -1, 0, 1, 2])]
>>> df2 = spark.createDataFrame([(4, -4, -2)], ('C1', 'C2', 'C3'))
>>> df2.select(sequence('C1', 'C2', 'C3').alias('r')).collect()
[Row(r=[4, 2, 0, -2, -4])]
"""
sc = SparkContext._active_spark_context
if step is None:
return Column(sc._jvm.functions.sequence(_to_java_column(start), _to_java_column(stop)))
else:
return Column(sc._jvm.functions.sequence(
_to_java_column(start), _to_java_column(stop), _to_java_column(step)))
@ignore_unicode_prefix
@since(3.0)
def from_csv(col, schema, options={}):
"""
Parses a column containing a CSV string to a row with the specified schema.
Returns `null`, in the case of an unparseable string.
:param col: string column in CSV format
:param schema: a string with schema in DDL format to use when parsing the CSV column.
:param options: options to control parsing. accepts the same options as the CSV datasource
>>> data = [("1,2,3",)]
>>> df = spark.createDataFrame(data, ("value",))
>>> df.select(from_csv(df.value, "a INT, b INT, c INT").alias("csv")).collect()
[Row(csv=Row(a=1, b=2, c=3))]
>>> value = data[0][0]
>>> df.select(from_csv(df.value, schema_of_csv(value)).alias("csv")).collect()
[Row(csv=Row(_c0=1, _c1=2, _c2=3))]
>>> data = [(" abc",)]
>>> df = spark.createDataFrame(data, ("value",))
>>> options = {'ignoreLeadingWhiteSpace': True}
>>> df.select(from_csv(df.value, "s string", options).alias("csv")).collect()
[Row(csv=Row(s=u'abc'))]
"""
sc = SparkContext._active_spark_context
if isinstance(schema, basestring):
schema = _create_column_from_literal(schema)
elif isinstance(schema, Column):
schema = _to_java_column(schema)
else:
raise TypeError("schema argument should be a column or string")
jc = sc._jvm.functions.from_csv(_to_java_column(col), schema, _options_to_str(options))
return Column(jc)
# ---------------------------- User Defined Function ----------------------------------
class PandasUDFType(object):
"""Pandas UDF Types. See :meth:`pyspark.sql.functions.pandas_udf`.
"""
SCALAR = PythonEvalType.SQL_SCALAR_PANDAS_UDF
SCALAR_ITER = PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF
GROUPED_MAP = PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF
COGROUPED_MAP = PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF
GROUPED_AGG = PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF
MAP_ITER = PythonEvalType.SQL_MAP_PANDAS_ITER_UDF
@since(1.3)
def udf(f=None, returnType=StringType()):
"""Creates a user defined function (UDF).
.. note:: The user-defined functions are considered deterministic by default. Due to
optimization, duplicate invocations may be eliminated or the function may even be invoked
more times than it is present in the query. If your function is not deterministic, call
`asNondeterministic` on the user defined function. E.g.:
>>> from pyspark.sql.types import IntegerType
>>> import random
>>> random_udf = udf(lambda: int(random.random() * 100), IntegerType()).asNondeterministic()
.. note:: The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
.. note:: The user-defined functions do not take keyword arguments on the calling side.
:param f: python function if used as a standalone function
:param returnType: the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
>>> from pyspark.sql.types import IntegerType
>>> slen = udf(lambda s: len(s), IntegerType())
>>> @udf
... def to_upper(s):
... if s is not None:
... return s.upper()
...
>>> @udf(returnType=IntegerType())
... def add_one(x):
... if x is not None:
... return x + 1
...
>>> df = spark.createDataFrame([(1, "John Doe", 21)], ("id", "name", "age"))
>>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")).show()
+----------+--------------+------------+
|slen(name)|to_upper(name)|add_one(age)|
+----------+--------------+------------+
| 8| JOHN DOE| 22|
+----------+--------------+------------+
"""
# The following table shows most of Python data and SQL type conversions in normal UDFs that
# are not yet visible to the user. Some of behaviors are buggy and might be changed in the near
# future. The table might have to be eventually documented externally.
# Please see SPARK-28131's PR to see the codes in order to generate the table below.
#
# +-----------------------------+--------------+----------+------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+----------------------------+------------+--------------+------------------+----------------------+ # noqa
# |SQL Type \ Python Value(Type)|None(NoneType)|True(bool)|1(int)| a(str)| 1970-01-01(date)|1970-01-01 00:00:00(datetime)|1.0(float)|array('i', [1])(array)|[1](list)| (1,)(tuple)|bytearray(b'ABC')(bytearray)| 1(Decimal)|{'a': 1}(dict)|Row(kwargs=1)(Row)|Row(namedtuple=1)(Row)| # noqa
# +-----------------------------+--------------+----------+------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+----------------------------+------------+--------------+------------------+----------------------+ # noqa
# | boolean| None| True| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | tinyint| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | smallint| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | int| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | bigint| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | string| None| 'true'| '1'| 'a'|'java.util.Gregor...| 'java.util.Gregor...| '1.0'| '[I@66cbb73a'| '[1]'|'[Ljava.lang.Obje...| '[B@5a51eb1a'| '1'| '{a=1}'| X| X| # noqa
# | date| None| X| X| X|datetime.date(197...| datetime.date(197...| X| X| X| X| X| X| X| X| X| # noqa
# | timestamp| None| X| X| X| X| datetime.datetime...| X| X| X| X| X| X| X| X| X| # noqa
# | float| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa
# | double| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa
# | array<int>| None| None| None| None| None| None| None| [1]| [1]| [1]| [65, 66, 67]| None| None| X| X| # noqa
# | binary| None| None| None|bytearray(b'a')| None| None| None| None| None| None| bytearray(b'ABC')| None| None| X| X| # noqa
# | decimal(10,0)| None| None| None| None| None| None| None| None| None| None| None|Decimal('1')| None| X| X| # noqa
# | map<string,int>| None| None| None| None| None| None| None| None| None| None| None| None| {'a': 1}| X| X| # noqa
# | struct<_1:int>| None| X| X| X| X| X| X| X|Row(_1=1)| Row(_1=1)| X| X| Row(_1=None)| Row(_1=1)| Row(_1=1)| # noqa
# +-----------------------------+--------------+----------+------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+----------------------------+------------+--------------+------------------+----------------------+ # noqa
#
# Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be
# used in `returnType`.
# Note: The values inside of the table are generated by `repr`.
# Note: 'X' means it throws an exception during the conversion.
# Note: Python 3.7.3 is used.
# decorator @udf, @udf(), @udf(dataType())
if f is None or isinstance(f, (str, DataType)):
# If DataType has been passed as a positional argument
# for decorator use it as a returnType
return_type = f or returnType
return functools.partial(_create_udf, returnType=return_type,
evalType=PythonEvalType.SQL_BATCHED_UDF)
else:
return _create_udf(f=f, returnType=returnType,
evalType=PythonEvalType.SQL_BATCHED_UDF)
@since(2.3)
def pandas_udf(f=None, returnType=None, functionType=None):
"""
Creates a vectorized user defined function (UDF).
:param f: user-defined function. A python function if used as a standalone function
:param returnType: the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
:param functionType: an enum value in :class:`pyspark.sql.functions.PandasUDFType`.
Default: SCALAR.
The function type of the UDF can be one of the following:
1. SCALAR
A scalar UDF defines a transformation: One or more `pandas.Series` -> A `pandas.Series`.
The length of the returned `pandas.Series` must be of the same as the input `pandas.Series`.
If the return type is :class:`StructType`, the returned value should be a `pandas.DataFrame`.
:class:`MapType`, nested :class:`StructType` are currently not supported as output types.
Scalar UDFs can be used with :meth:`pyspark.sql.DataFrame.withColumn` and
:meth:`pyspark.sql.DataFrame.select`.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> from pyspark.sql.types import IntegerType, StringType
>>> slen = pandas_udf(lambda s: s.str.len(), IntegerType()) # doctest: +SKIP
>>> @pandas_udf(StringType()) # doctest: +SKIP
... def to_upper(s):
... return s.str.upper()
...
>>> @pandas_udf("integer", PandasUDFType.SCALAR) # doctest: +SKIP
... def add_one(x):
... return x + 1
...
>>> df = spark.createDataFrame([(1, "John Doe", 21)],
... ("id", "name", "age")) # doctest: +SKIP
>>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")) \\
... .show() # doctest: +SKIP
+----------+--------------+------------+
|slen(name)|to_upper(name)|add_one(age)|
+----------+--------------+------------+
| 8| JOHN DOE| 22|
+----------+--------------+------------+
>>> @pandas_udf("first string, last string") # doctest: +SKIP
... def split_expand(n):
... return n.str.split(expand=True)
>>> df.select(split_expand("name")).show() # doctest: +SKIP
+------------------+
|split_expand(name)|
+------------------+
| [John, Doe]|
+------------------+
.. note:: The length of `pandas.Series` within a scalar UDF is not that of the whole input
column, but is the length of an internal batch used for each call to the function.
Therefore, this can be used, for example, to ensure the length of each returned
`pandas.Series`, and can not be used as the column length.
2. SCALAR_ITER
A scalar iterator UDF is semantically the same as the scalar Pandas UDF above except that the
wrapped Python function takes an iterator of batches as input instead of a single batch and,
instead of returning a single output batch, it yields output batches or explicitly returns an
generator or an iterator of output batches.
It is useful when the UDF execution requires initializing some state, e.g., loading a machine
learning model file to apply inference to every input batch.
.. note:: It is not guaranteed that one invocation of a scalar iterator UDF will process all
batches from one partition, although it is currently implemented this way.
Your code shall not rely on this behavior because it might change in the future for
further optimization, e.g., one invocation processes multiple partitions.
Scalar iterator UDFs are used with :meth:`pyspark.sql.DataFrame.withColumn` and
:meth:`pyspark.sql.DataFrame.select`.
>>> import pandas as pd # doctest: +SKIP
>>> from pyspark.sql.functions import col, pandas_udf, struct, PandasUDFType
>>> pdf = pd.DataFrame([1, 2, 3], columns=["x"]) # doctest: +SKIP
>>> df = spark.createDataFrame(pdf) # doctest: +SKIP
When the UDF is called with a single column that is not `StructType`, the input to the
underlying function is an iterator of `pd.Series`.
>>> @pandas_udf("long", PandasUDFType.SCALAR_ITER) # doctest: +SKIP
... def plus_one(batch_iter):
... for x in batch_iter:
... yield x + 1
...
>>> df.select(plus_one(col("x"))).show() # doctest: +SKIP
+-----------+
|plus_one(x)|
+-----------+
| 2|
| 3|
| 4|
+-----------+
When the UDF is called with more than one columns, the input to the underlying function is an
iterator of `pd.Series` tuple.
>>> @pandas_udf("long", PandasUDFType.SCALAR_ITER) # doctest: +SKIP
... def multiply_two_cols(batch_iter):
... for a, b in batch_iter:
... yield a * b
...
>>> df.select(multiply_two_cols(col("x"), col("x"))).show() # doctest: +SKIP
+-----------------------+
|multiply_two_cols(x, x)|
+-----------------------+
| 1|
| 4|
| 9|
+-----------------------+
When the UDF is called with a single column that is `StructType`, the input to the underlying
function is an iterator of `pd.DataFrame`.
>>> @pandas_udf("long", PandasUDFType.SCALAR_ITER) # doctest: +SKIP
... def multiply_two_nested_cols(pdf_iter):
... for pdf in pdf_iter:
... yield pdf["a"] * pdf["b"]
...
>>> df.select(
... multiply_two_nested_cols(
... struct(col("x").alias("a"), col("x").alias("b"))
... ).alias("y")
... ).show() # doctest: +SKIP
+---+
| y|
+---+
| 1|
| 4|
| 9|
+---+
In the UDF, you can initialize some states before processing batches, wrap your code with
`try ... finally ...` or use context managers to ensure the release of resources at the end
or in case of early termination.
>>> y_bc = spark.sparkContext.broadcast(1) # doctest: +SKIP
>>> @pandas_udf("long", PandasUDFType.SCALAR_ITER) # doctest: +SKIP
... def plus_y(batch_iter):
... y = y_bc.value # initialize some state
... try:
... for x in batch_iter:
... yield x + y
... finally:
... pass # release resources here, if any
...
>>> df.select(plus_y(col("x"))).show() # doctest: +SKIP
+---------+
|plus_y(x)|
+---------+
| 2|
| 3|
| 4|
+---------+
3. GROUPED_MAP
A grouped map UDF defines transformation: A `pandas.DataFrame` -> A `pandas.DataFrame`
The returnType should be a :class:`StructType` describing the schema of the returned
`pandas.DataFrame`. The column labels of the returned `pandas.DataFrame` must either match
the field names in the defined returnType schema if specified as strings, or match the
field data types by position if not strings, e.g. integer indices.
The length of the returned `pandas.DataFrame` can be arbitrary.
Grouped map UDFs are used with :meth:`pyspark.sql.GroupedData.apply`.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def normalize(pdf):
... v = pdf.v
... return pdf.assign(v=(v - v.mean()) / v.std())
>>> df.groupby("id").apply(normalize).show() # doctest: +SKIP
+---+-------------------+
| id| v|
+---+-------------------+
| 1|-0.7071067811865475|
| 1| 0.7071067811865475|
| 2|-0.8320502943378437|
| 2|-0.2773500981126146|
| 2| 1.1094003924504583|
+---+-------------------+
Alternatively, the user can define a function that takes two arguments.
In this case, the grouping key(s) will be passed as the first argument and the data will
be passed as the second argument. The grouping key(s) will be passed as a tuple of numpy
data types, e.g., `numpy.int32` and `numpy.float64`. The data will still be passed in
as a `pandas.DataFrame` containing all columns from the original Spark DataFrame.
This is useful when the user does not want to hardcode grouping key(s) in the function.
>>> import pandas as pd # doctest: +SKIP
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def mean_udf(key, pdf):
... # key is a tuple of one numpy.int64, which is the value
... # of 'id' for the current group
... return pd.DataFrame([key + (pdf.v.mean(),)])
>>> df.groupby('id').apply(mean_udf).show() # doctest: +SKIP
+---+---+
| id| v|
+---+---+
| 1|1.5|
| 2|6.0|
+---+---+
>>> @pandas_udf(
... "id long, `ceil(v / 2)` long, v double",
... PandasUDFType.GROUPED_MAP) # doctest: +SKIP
>>> def sum_udf(key, pdf):
... # key is a tuple of two numpy.int64s, which is the values
... # of 'id' and 'ceil(df.v / 2)' for the current group
... return pd.DataFrame([key + (pdf.v.sum(),)])
>>> df.groupby(df.id, ceil(df.v / 2)).apply(sum_udf).show() # doctest: +SKIP
+---+-----------+----+
| id|ceil(v / 2)| v|
+---+-----------+----+
| 2| 5|10.0|
| 1| 1| 3.0|
| 2| 3| 5.0|
| 2| 2| 3.0|
+---+-----------+----+
.. note:: If returning a new `pandas.DataFrame` constructed with a dictionary, it is
recommended to explicitly index the columns by name to ensure the positions are correct,
or alternatively use an `OrderedDict`.
For example, `pd.DataFrame({'id': ids, 'a': data}, columns=['id', 'a'])` or
`pd.DataFrame(OrderedDict([('id', ids), ('a', data)]))`.
.. seealso:: :meth:`pyspark.sql.GroupedData.apply`
4. GROUPED_AGG
A grouped aggregate UDF defines a transformation: One or more `pandas.Series` -> A scalar
The `returnType` should be a primitive data type, e.g., :class:`DoubleType`.
The returned scalar can be either a python primitive type, e.g., `int` or `float`
or a numpy data type, e.g., `numpy.int64` or `numpy.float64`.
:class:`MapType` and :class:`StructType` are currently not supported as output types.
Group aggregate UDFs are used with :meth:`pyspark.sql.GroupedData.agg` and
:class:`pyspark.sql.Window`
This example shows using grouped aggregated UDFs with groupby:
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("double", PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def mean_udf(v):
... return v.mean()
>>> df.groupby("id").agg(mean_udf(df['v'])).show() # doctest: +SKIP
+---+-----------+
| id|mean_udf(v)|
+---+-----------+
| 1| 1.5|
| 2| 6.0|
+---+-----------+
This example shows using grouped aggregated UDFs as window functions.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> from pyspark.sql import Window
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("double", PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def mean_udf(v):
... return v.mean()
>>> w = (Window.partitionBy('id')
... .orderBy('v')
... .rowsBetween(-1, 0))
>>> df.withColumn('mean_v', mean_udf(df['v']).over(w)).show() # doctest: +SKIP
+---+----+------+
| id| v|mean_v|
+---+----+------+
| 1| 1.0| 1.0|
| 1| 2.0| 1.5|
| 2| 3.0| 3.0|
| 2| 5.0| 4.0|
| 2|10.0| 7.5|
+---+----+------+
.. note:: For performance reasons, the input series to window functions are not copied.
Therefore, mutating the input series is not allowed and will cause incorrect results.
For the same reason, users should also not rely on the index of the input series.
.. seealso:: :meth:`pyspark.sql.GroupedData.agg` and :class:`pyspark.sql.Window`
5. MAP_ITER
A map iterator Pandas UDFs are used to transform data with an iterator of batches.
It can be used with :meth:`pyspark.sql.DataFrame.mapInPandas`.
It can return the output of arbitrary length in contrast to the scalar Pandas UDF.
It maps an iterator of batches in the current :class:`DataFrame` using a Pandas user-defined
function and returns the result as a :class:`DataFrame`.
The user-defined function should take an iterator of `pandas.DataFrame`\\s and return another
iterator of `pandas.DataFrame`\\s. All columns are passed together as an
iterator of `pandas.DataFrame`\\s to the user-defined function and the returned iterator of
`pandas.DataFrame`\\s are combined as a :class:`DataFrame`.
>>> df = spark.createDataFrame([(1, 21), (2, 30)],
... ("id", "age")) # doctest: +SKIP
>>> @pandas_udf(df.schema, PandasUDFType.MAP_ITER) # doctest: +SKIP
... def filter_func(batch_iter):
... for pdf in batch_iter:
... yield pdf[pdf.id == 1]
>>> df.mapInPandas(filter_func).show() # doctest: +SKIP
+---+---+
| id|age|
+---+---+
| 1| 21|
+---+---+
.. note:: The user-defined functions are considered deterministic by default. Due to
optimization, duplicate invocations may be eliminated or the function may even be invoked
more times than it is present in the query. If your function is not deterministic, call
`asNondeterministic` on the user defined function. E.g.:
>>> @pandas_udf('double', PandasUDFType.SCALAR) # doctest: +SKIP
... def random(v):
... import numpy as np
... import pandas as pd
... return pd.Series(np.random.randn(len(v))
>>> random = random.asNondeterministic() # doctest: +SKIP
.. note:: The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
.. note:: The user-defined functions do not take keyword arguments on the calling side.
.. note:: The data type of returned `pandas.Series` from the user-defined functions should be
matched with defined returnType (see :meth:`types.to_arrow_type` and
:meth:`types.from_arrow_type`). When there is mismatch between them, Spark might do
conversion on returned data. The conversion is not guaranteed to be correct and results
should be checked for accuracy by users.
"""
# The following table shows most of Pandas data and SQL type conversions in Pandas UDFs that
# are not yet visible to the user. Some of behaviors are buggy and might be changed in the near
# future. The table might have to be eventually documented externally.
# Please see SPARK-28132's PR to see the codes in order to generate the table below.
#
# +-----------------------------+----------------------+------------------+------------------+------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+--------------+--------------+--------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+--------------+-----------------+------------------+-----------+--------------------------------+ # noqa
# |SQL Type \ Pandas Value(Type)|None(object(NoneType))| True(bool)| 1(int8)| 1(int16)| 1(int32)| 1(int64)| 1(uint8)| 1(uint16)| 1(uint32)| 1(uint64)| 1.0(float16)| 1.0(float32)| 1.0(float64)|1970-01-01 00:00:00(datetime64[ns])|1970-01-01 00:00:00-05:00(datetime64[ns, US/Eastern])|a(object(string))| 1(object(Decimal))|[1 2 3](object(array[int32]))| 1.0(float128)|(1+0j)(complex64)|(1+0j)(complex128)|A(category)|1 days 00:00:00(timedelta64[ns])| # noqa
# +-----------------------------+----------------------+------------------+------------------+------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+--------------+--------------+--------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+--------------+-----------------+------------------+-----------+--------------------------------+ # noqa
# | boolean| None| True| True| True| True| True| True| True| True| True| True| True| True| X| X| X| X| X| X| X| X| X| X| # noqa
# | tinyint| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| X| X| X| 1| X| X| X| X| 0| X| # noqa
# | smallint| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| X| X| X| 1| X| X| X| X| X| X| # noqa
# | int| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| X| X| X| 1| X| X| X| X| X| X| # noqa
# | bigint| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 0| 18000000000000| X| 1| X| X| X| X| X| X| # noqa
# | float| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X| X| X| X| X| X| X| X| # noqa
# | double| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X| X| X| X| X| X| X| X| # noqa
# | date| None| X| X| X|datetime.date(197...| X| X| X| X| X| X| X| X| datetime.date(197...| datetime.date(197...| X|datetime.date(197...| X| X| X| X| X| X| # noqa
# | timestamp| None| X| X| X| X|datetime.datetime...| X| X| X| X| X| X| X| datetime.datetime...| datetime.datetime...| X|datetime.datetime...| X| X| X| X| X| X| # noqa
# | string| None| ''| ''| ''| '\x01'| '\x01'| ''| ''| '\x01'| '\x01'| ''| ''| ''| X| X| 'a'| X| X| ''| X| ''| X| X| # noqa
# | decimal(10,0)| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| Decimal('1')| X| X| X| X| X| X| # noqa
# | array<int>| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| [1, 2, 3]| X| X| X| X| X| # noqa
# | map<string,int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa
# | struct<_1:int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa
# | binary| None|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')| bytearray(b'\x01')| bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'')|bytearray(b'')|bytearray(b'')| bytearray(b'')| bytearray(b'')| bytearray(b'a')| X| X|bytearray(b'')| bytearray(b'')| bytearray(b'')| X| bytearray(b'')| # noqa
# +-----------------------------+----------------------+------------------+------------------+------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+--------------+--------------+--------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+--------------+-----------------+------------------+-----------+--------------------------------+ # noqa
#
# Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be
# used in `returnType`.
# Note: The values inside of the table are generated by `repr`.
# Note: Python 3.7.3, Pandas 0.24.2 and PyArrow 0.13.0 are used.
# Note: Timezone is KST.
# Note: 'X' means it throws an exception during the conversion.
# decorator @pandas_udf(returnType, functionType)
is_decorator = f is None or isinstance(f, (str, DataType))
if is_decorator:
# If DataType has been passed as a positional argument
# for decorator use it as a returnType
return_type = f or returnType
if functionType is not None:
# @pandas_udf(dataType, functionType=functionType)
# @pandas_udf(returnType=dataType, functionType=functionType)
eval_type = functionType
elif returnType is not None and isinstance(returnType, int):
# @pandas_udf(dataType, functionType)
eval_type = returnType
else:
# @pandas_udf(dataType) or @pandas_udf(returnType=dataType)
eval_type = PythonEvalType.SQL_SCALAR_PANDAS_UDF
else:
return_type = returnType
if functionType is not None:
eval_type = functionType
else:
eval_type = PythonEvalType.SQL_SCALAR_PANDAS_UDF
if return_type is None:
raise ValueError("Invalid returnType: returnType can not be None")
if eval_type not in [PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF,
PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF,
PythonEvalType.SQL_MAP_PANDAS_ITER_UDF,
PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF]:
raise ValueError("Invalid functionType: "
"functionType must be one the values from PandasUDFType")
if is_decorator:
return functools.partial(_create_udf, returnType=return_type, evalType=eval_type)
else:
return _create_udf(f=f, returnType=return_type, evalType=eval_type)
blacklist = ['map', 'since', 'ignore_unicode_prefix']
__all__ = [k for k, v in globals().items()
if not k.startswith('_') and k[0].islower() and callable(v) and k not in blacklist]
__all__ += ["PandasUDFType"]
__all__.sort()
def _test():
import doctest
from pyspark.sql import Row, SparkSession
import pyspark.sql.functions
globs = pyspark.sql.functions.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.functions tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = spark.createDataFrame([Row(name='Alice', age=2), Row(name='Bob', age=5)])
spark.conf.set("spark.sql.legacy.utcTimestampFunc.enabled", "true")
(failure_count, test_count) = doctest.testmod(
pyspark.sql.functions, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
spark.conf.unset("spark.sql.legacy.utcTimestampFunc.enabled")
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
aflaxman/scikit-learn | sklearn/svm/classes.py | 9 | 44254 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..utils import check_X_y
from ..utils.validation import _num_samples
from ..utils.multiclass import check_classification_targets
class LinearSVC(BaseEstimator, LinearClassifierMixin,
SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to ``coef_``
vectors that are sparse.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
C : float, optional (default=1.0)
Penalty parameter C of the error term.
multi_class : string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
``"ovr"`` trains n_classes one-vs-rest classifiers, while
``"crammer_singer"`` optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If ``"crammer_singer"`` is chosen, the options loss, penalty and dual
will be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
``[x, self.intercept_scaling]``,
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to ``class_weight[i]*C`` for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data for the dual coordinate descent (if ``dual=True``). When
``dual=False`` the underlying implementation of :class:`LinearSVC`
is not random and ``random_state`` has no effect on the results. If
int, random_state is the seed used by the random number generator; If
RandomState instance, random_state is the random number generator; If
None, the random number generator is the RandomState instance used by
`np.random`.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
``coef_`` is a readonly property derived from ``raw_coef_`` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import LinearSVC
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_features=4, random_state=0)
>>> clf = LinearSVC(random_state=0)
>>> clf.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> print(clf.coef_)
[[ 0.08551385 0.39414796 0.49847831 0.37513797]]
>>> print(clf.intercept_)
[ 0.28418066]
>>> print(clf.predict([[0, 0, 0, 0]]))
[1]
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation, liblinear, uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
sample_weight : array-like, shape = [n_samples], optional
Array of weights that are assigned to individual
samples. If not provided,
then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
if self.loss in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(self.loss)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss, sample_weight=sample_weight)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive' (default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set ``epsilon=0``.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import LinearSVR
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_features=4, random_state=0)
>>> regr = LinearSVR(random_state=0)
>>> regr.fit(X, y)
LinearSVR(C=1.0, dual=True, epsilon=0.0, fit_intercept=True,
intercept_scaling=1.0, loss='epsilon_insensitive', max_iter=1000,
random_state=0, tol=0.0001, verbose=0)
>>> print(regr.coef_)
[ 16.35750999 26.91499923 42.30652207 60.47843124]
>>> print(regr.intercept_)
[-4.29756543]
>>> print(regr.predict([[0, 0, 0, 0]]))
[-4.29756543]
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
sample_weight : array-like, shape = [n_samples], optional
Array of weights that are assigned to individual
samples. If not provided,
then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
if self.loss in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(self.loss)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon, sample_weight=sample_weight)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr', default='ovr'
Whether to return a one-vs-rest ('ovr') decision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
.. versionchanged:: 0.19
decision_function_shape is 'ovr' by default.
.. versionadded:: 0.17
*decision_function_shape='ovr'* is recommended.
.. versionchanged:: 0.17
Deprecated *decision_function_shape='ovo' and None*.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator used when shuffling
the data for probability estimates. If int, random_state is the
seed used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random
number generator is the RandomState instance used by `np.random`.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape='ovr',
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The "balanced" mode uses the values of y to automatically
adjust weights inversely proportional to class frequencies as
``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr', default='ovr'
Whether to return a one-vs-rest ('ovr') decision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
.. versionchanged:: 0.19
decision_function_shape is 'ovr' by default.
.. versionadded:: 0.17
*decision_function_shape='ovr'* is recommended.
.. versionchanged:: 0.17
Deprecated *decision_function_shape='ovo' and None*.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator used when shuffling
the data for probability estimates. If int, random_state is the seed
used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random
number generator is the RandomState instance used by `np.random`.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
shrinking=True, probability=False, tol=1e-3, cache_size=200,
class_weight=None, verbose=False, max_iter=-1,
decision_function_shape='ovr', random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
sample_weight : array-like, shape = [n_samples]
Individual weights for each sample
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int, RandomState instance or None, optional (default=None)
Ignored.
.. deprecated:: 0.20
``random_state`` has been deprecated in 0.20 and will be removed in
0.22.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [1,]
Constant in the decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
if self.random_state is not None:
warnings.warn("The random_state parameter is deprecated and will"
" be removed in version 0.22.", DeprecationWarning)
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)),
sample_weight=sample_weight, **params)
return self
def decision_function(self, X):
"""Signed distance to the separating hyperplane.
Signed distance is positive for an inlier and negative for an outlier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
def predict(self, X):
"""
Perform classification on samples in X.
For an one-class model, +1 or -1 is returned.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
y_pred : array, shape (n_samples,)
Class labels for samples in X.
"""
y = super(OneClassSVM, self).predict(X)
return np.asarray(y, dtype=np.intp)
| bsd-3-clause |
PatrickOReilly/scikit-learn | sklearn/tree/tree.py | 1 | 44306 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
# Nelson Liu <nelson@nelsonliu.me>
#
# License: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta
from abc import abstractmethod
from math import ceil
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.multiclass import check_classification_targets
from ..exceptions import NotFittedError
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE,
"mae": _criterion.MAE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
min_impurity_split,
class_weight=None,
presort=False,
increasing=None,
decreasing=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
self.class_weight = class_weight
self.presort = presort
self.increasing = increasing
self.decreasing = decreasing
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
min_samples_leaf = self.min_samples_leaf
else: # float
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = self.min_samples_split
else: # float
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if not (0. < self.min_samples_split <= 1. or
2 <= self.min_samples_split):
raise ValueError("min_samples_split must be in at least 2"
" or in (0, 1], got %s" % min_samples_split)
if not (0. < self.min_samples_leaf <= 0.5 or
1 <= self.min_samples_leaf):
raise ValueError("min_samples_leaf must be at least than 1 "
"or in (0, 0.5], got %s" % min_samples_leaf)
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
if self.min_impurity_split < 0.:
raise ValueError("min_impurity_split must be greater than or equal "
"to 0")
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_,
n_samples)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
def _encode_monotonic(increasing, decreasing):
if increasing is None: increasing = []
if decreasing is None: decreasing = []
def is_int_in_range(feature):
return isinstance(feature, int) and 0 <= feature < self.n_features_
def is_valid(features):
return (isinstance(features, list) and
all(is_int_in_range(feature) for feature in features))
if not is_valid(increasing):
raise ValueError("increasing should be a list of ints in the range [0,n_features].")
if not is_valid(decreasing):
raise ValueError("decreasing should be a list of ints in the range [0,n_features].")
if increasing and decreasing:
intersection = set(increasing) & set(decreasing)
if intersection:
raise ValueError("The following features cannot be both increasing and decreasing: " + str(list(intersection)))
monotonic = np.zeros(self.n_features_, dtype=np.int32)
if increasing:
for feature in increasing:
monotonic[feature] = 1
if decreasing:
for feature in decreasing:
monotonic[feature] = -1
return monotonic
monotonic = _encode_monotonic(self.increasing, self.decreasing)
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort,
monotonic)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth, self.min_impurity_split)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes, self.min_impurity_split)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is %s and "
"input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
increasing : list of ints, optional (default=None)
Indices of features to have a monotonically increasing effect.
decreasing : list of ints, optional (default=None)
Indices of features to have a monotonically decreasing effect.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
class_weight=None,
presort=False,
increasing=None,
decreasing=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
min_impurity_split=min_impurity_split,
presort=presort,
increasing=increasing,
decreasing=decreasing)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. If the impurity
of a node is below the threshold, the node is a leaf.
.. versionadded:: 0.18
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
increasing : list of ints, optional (default=None)
Indices of features to have a monotonically increasing effect.
decreasing : list of ints, optional (default=None)
Indices of features to have a monotonically decreasing effect.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
presort=False,
increasing=None,
decreasing=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
min_impurity_split=min_impurity_split,
presort=presort,
increasing=increasing,
decreasing=decreasing)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
class_weight=None,
increasing=None,
decreasing=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
min_impurity_split=min_impurity_split,
random_state=random_state,
increasing=increasing,
decreasing=decreasing)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
min_impurity_split=1e-7,
max_leaf_nodes=None,
increasing=None,
decreasing=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
min_impurity_split=min_impurity_split,
random_state=random_state,
increasing=increasing,
decreasing=decreasing)
| bsd-3-clause |
srowen/spark | python/pyspark/pandas/tests/test_series.py | 9 | 118972 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from collections import defaultdict
from distutils.version import LooseVersion
import inspect
from itertools import product
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pyspark.ml.linalg import SparseVector
from pyspark import pandas as ps
from pyspark.testing.pandasutils import (
have_tabulate,
PandasOnSparkTestCase,
SPARK_CONF_ARROW_ENABLED,
tabulate_requirement_message,
)
from pyspark.testing.sqlutils import SQLTestUtils
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.pandas.missing.series import MissingPandasLikeSeries
from pyspark.pandas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
class SeriesTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pser(self):
return pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
@property
def psser(self):
return ps.from_pandas(self.pser)
def test_series_ops(self):
pser = self.pser
psser = self.psser
self.assert_eq(psser + 1 + 10 * psser, pser + 1 + 10 * pser)
self.assert_eq(psser + 1 + 10 * psser.index, pser + 1 + 10 * pser.index)
self.assert_eq(psser.index + 1 + 10 * psser, pser.index + 1 + 10 * pser)
def test_series_tuple_name(self):
pser = self.pser
pser.name = ("x", "a")
psser = ps.from_pandas(pser)
self.assert_eq(psser, pser)
self.assert_eq(psser.name, pser.name)
pser.name = ("y", "z")
psser.name = ("y", "z")
self.assert_eq(psser, pser)
self.assert_eq(psser.name, pser.name)
def test_repr_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
s = ps.range(10)["id"]
s.__repr__()
s.rename("a", inplace=True)
self.assertEqual(s.__repr__(), s.rename("a").__repr__())
def _check_extension(self, psser, pser):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(psser, pser, check_exact=False)
self.assertTrue(isinstance(psser.dtype, extension_dtypes))
else:
self.assert_eq(psser, pser)
def test_empty_series(self):
pser_a = pd.Series([], dtype="i1")
pser_b = pd.Series([], dtype="str")
self.assert_eq(ps.from_pandas(pser_a), pser_a)
psser_b = ps.from_pandas(pser_b)
self.assert_eq(psser_b, pser_b)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self.assert_eq(ps.from_pandas(pser_a), pser_a)
self.assert_eq(ps.from_pandas(pser_b), pser_b)
def test_all_null_series(self):
pser_a = pd.Series([None, None, None], dtype="float64")
pser_b = pd.Series([None, None, None], dtype="str")
self.assert_eq(ps.from_pandas(pser_a), pser_a)
psser_b = ps.from_pandas(pser_b)
self.assert_eq(psser_b, pser_b)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self.assert_eq(ps.from_pandas(pser_a), pser_a)
self.assert_eq(ps.from_pandas(pser_b), pser_b)
def test_head(self):
psser = self.psser
pser = self.pser
self.assert_eq(psser.head(3), pser.head(3))
self.assert_eq(psser.head(0), pser.head(0))
self.assert_eq(psser.head(-3), pser.head(-3))
self.assert_eq(psser.head(-10), pser.head(-10))
def test_last(self):
with self.assertRaises(TypeError):
self.psser.last("1D")
index = pd.date_range("2018-04-09", periods=4, freq="2D")
pser = pd.Series([1, 2, 3, 4], index=index)
psser = ps.from_pandas(pser)
self.assert_eq(psser.last("1D"), pser.last("1D"))
def test_first(self):
with self.assertRaises(TypeError):
self.psser.first("1D")
index = pd.date_range("2018-04-09", periods=4, freq="2D")
pser = pd.Series([1, 2, 3, 4], index=index)
psser = ps.from_pandas(pser)
self.assert_eq(psser.first("1D"), pser.first("1D"))
def test_rename(self):
pser = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
psser = ps.from_pandas(pser)
pser.name = "renamed"
psser.name = "renamed"
self.assertEqual(psser.name, "renamed")
self.assert_eq(psser, pser)
pser.name = None
psser.name = None
self.assertEqual(psser.name, None)
self.assert_eq(psser, pser)
pidx = pser.index
psidx = psser.index
pidx.name = "renamed"
psidx.name = "renamed"
self.assertEqual(psidx.name, "renamed")
self.assert_eq(psidx, pidx)
expected_error_message = "Series.name must be a hashable type"
with self.assertRaisesRegex(TypeError, expected_error_message):
psser.name = ["renamed"]
with self.assertRaisesRegex(TypeError, expected_error_message):
psser.name = ["0", "1"]
with self.assertRaisesRegex(TypeError, expected_error_message):
ps.Series([1, 2, 3], name=["0", "1"])
def test_rename_method(self):
# Series name
pser = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
psser = ps.from_pandas(pser)
self.assert_eq(psser.rename("y"), pser.rename("y"))
self.assertEqual(psser.name, "x") # no mutation
self.assert_eq(psser.rename(), pser.rename())
self.assert_eq((psser.rename("y") + 1).head(), (pser.rename("y") + 1).head())
psser.rename("z", inplace=True)
pser.rename("z", inplace=True)
self.assertEqual(psser.name, "z")
self.assert_eq(psser, pser)
expected_error_message = "Series.name must be a hashable type"
with self.assertRaisesRegex(TypeError, expected_error_message):
psser.rename(["0", "1"])
# Series index
# pser = pd.Series(['a', 'b', 'c', 'd', 'e', 'f', 'g'], name='x')
# psser = ps.from_pandas(s)
# TODO: index
# res = psser.rename(lambda x: x ** 2)
# self.assert_eq(res, pser.rename(lambda x: x ** 2))
# res = psser.rename(pser)
# self.assert_eq(res, pser.rename(pser))
# res = psser.rename(psser)
# self.assert_eq(res, pser.rename(pser))
# res = psser.rename(lambda x: x**2, inplace=True)
# self.assertis(res, psser)
# s.rename(lambda x: x**2, inplace=True)
# self.assert_eq(psser, pser)
def test_rename_axis(self):
index = pd.Index(["A", "B", "C"], name="index")
pser = pd.Series([1.0, 2.0, 3.0], index=index, name="name")
psser = ps.from_pandas(pser)
self.assert_eq(
pser.rename_axis("index2").sort_index(),
psser.rename_axis("index2").sort_index(),
)
self.assert_eq(
(pser + 1).rename_axis("index2").sort_index(),
(psser + 1).rename_axis("index2").sort_index(),
)
pser2 = pser.copy()
psser2 = psser.copy()
pser2.rename_axis("index2", inplace=True)
psser2.rename_axis("index2", inplace=True)
self.assert_eq(pser2.sort_index(), psser2.sort_index())
self.assertRaises(ValueError, lambda: psser.rename_axis(["index2", "index3"]))
self.assertRaises(TypeError, lambda: psser.rename_axis(mapper=["index2"], index=["index3"]))
# index/columns parameters and dict_like/functions mappers introduced in pandas 0.24.0
if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"):
self.assert_eq(
pser.rename_axis(index={"index": "index2", "missing": "index4"}).sort_index(),
psser.rename_axis(index={"index": "index2", "missing": "index4"}).sort_index(),
)
self.assert_eq(
pser.rename_axis(index=str.upper).sort_index(),
psser.rename_axis(index=str.upper).sort_index(),
)
else:
expected = psser
expected.index.name = "index2"
result = psser.rename_axis(index={"index": "index2", "missing": "index4"}).sort_index()
self.assert_eq(expected, result)
expected = psser
expected.index.name = "INDEX"
result = psser.rename_axis(index=str.upper).sort_index()
self.assert_eq(expected, result)
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["index1", "index2"]
)
pser = pd.Series([1.0, 2.0, 3.0], index=index, name="name")
psser = ps.from_pandas(pser)
self.assert_eq(
pser.rename_axis(["index3", "index4"]).sort_index(),
psser.rename_axis(["index3", "index4"]).sort_index(),
)
self.assertRaises(ValueError, lambda: psser.rename_axis(["index3", "index4", "index5"]))
# index/columns parameters and dict_like/functions mappers introduced in pandas 0.24.0
if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"):
self.assert_eq(
pser.rename_axis(
index={"index1": "index3", "index2": "index4", "missing": "index5"}
).sort_index(),
psser.rename_axis(
index={"index1": "index3", "index2": "index4", "missing": "index5"}
).sort_index(),
)
self.assert_eq(
pser.rename_axis(index=str.upper).sort_index(),
psser.rename_axis(index=str.upper).sort_index(),
)
else:
expected = psser
expected.index.names = ["index3", "index4"]
result = psser.rename_axis(
index={"index1": "index3", "index2": "index4", "missing": "index5"}
).sort_index()
self.assert_eq(expected, result)
expected.index.names = ["INDEX1", "INDEX2"]
result = psser.rename_axis(index=str.upper).sort_index()
self.assert_eq(expected, result)
def test_or(self):
pdf = pd.DataFrame(
{
"left": [True, False, True, False, np.nan, np.nan, True, False, np.nan],
"right": [True, False, False, True, True, False, np.nan, np.nan, np.nan],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["left"] | psdf["right"], pdf["left"] | pdf["right"])
self.assert_eq(psdf["left"] | True, pdf["left"] | True)
self.assert_eq(psdf["left"] | False, pdf["left"] | False)
self.assert_eq(psdf["left"] | None, pdf["left"] | None)
self.assert_eq(True | psdf["right"], True | pdf["right"])
self.assert_eq(False | psdf["right"], False | pdf["right"])
self.assert_eq(None | psdf["right"], None | pdf["right"])
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_or_extenstion_dtypes(self):
pdf = pd.DataFrame(
{
"left": [True, False, True, False, np.nan, np.nan, True, False, np.nan],
"right": [True, False, False, True, True, False, np.nan, np.nan, np.nan],
}
).astype("boolean")
psdf = ps.from_pandas(pdf)
self._check_extension(psdf["left"] | psdf["right"], pdf["left"] | pdf["right"])
self._check_extension(psdf["left"] | True, pdf["left"] | True)
self._check_extension(psdf["left"] | False, pdf["left"] | False)
self._check_extension(psdf["left"] | pd.NA, pdf["left"] | pd.NA)
self._check_extension(True | psdf["right"], True | pdf["right"])
self._check_extension(False | psdf["right"], False | pdf["right"])
self._check_extension(pd.NA | psdf["right"], pd.NA | pdf["right"])
def test_and(self):
pdf = pd.DataFrame(
{
"left": [True, False, True, False, np.nan, np.nan, True, False, np.nan],
"right": [True, False, False, True, True, False, np.nan, np.nan, np.nan],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["left"] & psdf["right"], pdf["left"] & pdf["right"])
self.assert_eq(psdf["left"] & True, pdf["left"] & True)
self.assert_eq(psdf["left"] & False, pdf["left"] & False)
self.assert_eq(psdf["left"] & None, pdf["left"] & None)
self.assert_eq(True & psdf["right"], True & pdf["right"])
self.assert_eq(False & psdf["right"], False & pdf["right"])
self.assert_eq(None & psdf["right"], None & pdf["right"])
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_and_extenstion_dtypes(self):
pdf = pd.DataFrame(
{
"left": [True, False, True, False, np.nan, np.nan, True, False, np.nan],
"right": [True, False, False, True, True, False, np.nan, np.nan, np.nan],
}
).astype("boolean")
psdf = ps.from_pandas(pdf)
self._check_extension(psdf["left"] & psdf["right"], pdf["left"] & pdf["right"])
self._check_extension(psdf["left"] & True, pdf["left"] & True)
self._check_extension(psdf["left"] & False, pdf["left"] & False)
self._check_extension(psdf["left"] & pd.NA, pdf["left"] & pd.NA)
self._check_extension(True & psdf["right"], True & pdf["right"])
self._check_extension(False & psdf["right"], False & pdf["right"])
self._check_extension(pd.NA & psdf["right"], pd.NA & pdf["right"])
def test_to_numpy(self):
pser = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
psser = ps.from_pandas(pser)
self.assert_eq(psser.to_numpy(), pser.values)
def test_isin(self):
pser = pd.Series(["lama", "cow", "lama", "beetle", "lama", "hippo"], name="animal")
psser = ps.from_pandas(pser)
self.assert_eq(psser.isin(["cow", "lama"]), pser.isin(["cow", "lama"]))
self.assert_eq(psser.isin(np.array(["cow", "lama"])), pser.isin(np.array(["cow", "lama"])))
self.assert_eq(psser.isin({"cow"}), pser.isin({"cow"}))
pser = pd.Series([np.int64(1), np.int32(1), 1])
psser = ps.from_pandas(pser)
self.assert_eq(psser.isin([np.int64(1)]), pser.isin([np.int64(1)]))
msg = "only list-like objects are allowed to be passed to isin()"
with self.assertRaisesRegex(TypeError, msg):
psser.isin(1)
def test_drop_duplicates(self):
pdf = pd.DataFrame({"animal": ["lama", "cow", "lama", "beetle", "lama", "hippo"]})
psdf = ps.from_pandas(pdf)
pser = pdf.animal
psser = psdf.animal
self.assert_eq(psser.drop_duplicates().sort_index(), pser.drop_duplicates().sort_index())
self.assert_eq(
psser.drop_duplicates(keep="last").sort_index(),
pser.drop_duplicates(keep="last").sort_index(),
)
# inplace
psser.drop_duplicates(keep=False, inplace=True)
pser.drop_duplicates(keep=False, inplace=True)
self.assert_eq(psser.sort_index(), pser.sort_index())
self.assert_eq(psdf, pdf)
def test_reindex(self):
index = ["A", "B", "C", "D", "E"]
pser = pd.Series([1.0, 2.0, 3.0, 4.0, None], index=index, name="x")
psser = ps.from_pandas(pser)
self.assert_eq(pser, psser)
self.assert_eq(
pser.reindex(["A", "B"]).sort_index(),
psser.reindex(["A", "B"]).sort_index(),
)
self.assert_eq(
pser.reindex(["A", "B", "2", "3"]).sort_index(),
psser.reindex(["A", "B", "2", "3"]).sort_index(),
)
self.assert_eq(
pser.reindex(["A", "E", "2"], fill_value=0).sort_index(),
psser.reindex(["A", "E", "2"], fill_value=0).sort_index(),
)
self.assertRaises(TypeError, lambda: psser.reindex(index=123))
def test_reindex_like(self):
data = [1.0, 2.0, None]
index = pd.Index(["A", "B", "C"], name="index1")
pser = pd.Series(data=data, index=index, name="name1")
psser = ps.from_pandas(pser)
# Reindexing single Index on single Index
data2 = [3.0, None, 4.0]
index2 = pd.Index(["A", "C", "D"], name="index2")
pser2 = pd.Series(data=data2, index=index2, name="name2")
psser2 = ps.from_pandas(pser2)
self.assert_eq(
pser.reindex_like(pser2).sort_index(),
psser.reindex_like(psser2).sort_index(),
)
self.assert_eq(
(pser + 1).reindex_like(pser2).sort_index(),
(psser + 1).reindex_like(psser2).sort_index(),
)
# Reindexing MultiIndex on single Index
index2 = pd.MultiIndex.from_tuples(
[("A", "G"), ("C", "D"), ("I", "J")], names=["index3", "index4"]
)
pser2 = pd.Series(data=data2, index=index2, name="name2")
psser2 = ps.from_pandas(pser2)
self.assert_eq(
pser.reindex_like(pser2).sort_index(),
psser.reindex_like(psser2).sort_index(),
)
self.assertRaises(TypeError, lambda: psser.reindex_like(index2))
self.assertRaises(AssertionError, lambda: psser2.reindex_like(psser))
# Reindexing MultiIndex on MultiIndex
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["index1", "index2"]
)
pser = pd.Series(data=data, index=index, name="name1")
psser = ps.from_pandas(pser)
self.assert_eq(
pser.reindex_like(pser2).sort_index(),
psser.reindex_like(psser2).sort_index(),
)
# Reindexing with DataFrame
index2 = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["name3", "name4"]
)
pdf = pd.DataFrame(data=data, index=index2)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pser.reindex_like(pdf).sort_index(),
psser.reindex_like(psdf).sort_index(),
)
def test_fillna(self):
pdf = pd.DataFrame({"x": [np.nan, 2, 3, 4, np.nan, 6], "y": [np.nan, 2, 3, 4, np.nan, 6]})
psdf = ps.from_pandas(pdf)
pser = pdf.x
psser = psdf.x
self.assert_eq(psser.fillna(0), pser.fillna(0))
self.assert_eq(psser.fillna(np.nan).fillna(0), pser.fillna(np.nan).fillna(0))
psser.fillna(0, inplace=True)
pser.fillna(0, inplace=True)
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
# test considering series does not have NA/NaN values
psser.fillna(0, inplace=True)
pser.fillna(0, inplace=True)
self.assert_eq(psser, pser)
psser = psdf.x.rename("y")
pser = pdf.x.rename("y")
psser.fillna(0, inplace=True)
pser.fillna(0, inplace=True)
self.assert_eq(psser.head(), pser.head())
pser = pd.Series([1, 2, 3, 4, 5, 6], name="x")
psser = ps.from_pandas(pser)
pser.loc[3] = np.nan
psser.loc[3] = np.nan
self.assert_eq(psser.fillna(0), pser.fillna(0))
self.assert_eq(psser.fillna(method="ffill"), pser.fillna(method="ffill"))
self.assert_eq(psser.fillna(method="bfill"), pser.fillna(method="bfill"))
# inplace fillna on non-nullable column
pdf = pd.DataFrame({"a": [1, 2, None], "b": [1, 2, 3]})
psdf = ps.from_pandas(pdf)
pser = pdf.b
psser = psdf.b
self.assert_eq(psser.fillna(0), pser.fillna(0))
self.assert_eq(psser.fillna(np.nan).fillna(0), pser.fillna(np.nan).fillna(0))
psser.fillna(0, inplace=True)
pser.fillna(0, inplace=True)
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
def test_dropna(self):
pdf = pd.DataFrame({"x": [np.nan, 2, 3, 4, np.nan, 6]})
psdf = ps.from_pandas(pdf)
pser = pdf.x
psser = psdf.x
self.assert_eq(psser.dropna(), pser.dropna())
pser.dropna(inplace=True)
psser.dropna(inplace=True)
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
def test_nunique(self):
pser = pd.Series([1, 2, 1, np.nan])
psser = ps.from_pandas(pser)
# Assert NaNs are dropped by default
nunique_result = psser.nunique()
self.assertEqual(nunique_result, 2)
self.assert_eq(nunique_result, pser.nunique())
# Assert including NaN values
nunique_result = psser.nunique(dropna=False)
self.assertEqual(nunique_result, 3)
self.assert_eq(nunique_result, pser.nunique(dropna=False))
# Assert approximate counts
self.assertEqual(ps.Series(range(100)).nunique(approx=True), 103)
self.assertEqual(ps.Series(range(100)).nunique(approx=True, rsd=0.01), 100)
def test_value_counts(self):
# this is also containing test for Index & MultiIndex
pser = pd.Series(
[1, 2, 1, 3, 3, np.nan, 1, 4, 2, np.nan, 3, np.nan, 3, 1, 3],
index=[1, 2, 1, 3, 3, np.nan, 1, 4, 2, np.nan, 3, np.nan, 3, 1, 3],
name="x",
)
psser = ps.from_pandas(pser)
exp = pser.value_counts()
res = psser.value_counts()
self.assertEqual(res.name, exp.name)
self.assert_eq(res, exp)
self.assert_eq(psser.value_counts(normalize=True), pser.value_counts(normalize=True))
self.assert_eq(psser.value_counts(ascending=True), pser.value_counts(ascending=True))
self.assert_eq(
psser.value_counts(normalize=True, dropna=False),
pser.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
psser.value_counts(ascending=True, dropna=False),
pser.value_counts(ascending=True, dropna=False),
)
self.assert_eq(
psser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True)
)
self.assert_eq(
psser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True)
)
self.assert_eq(
psser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
psser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
)
with self.assertRaisesRegex(
NotImplementedError, "value_counts currently does not support bins"
):
psser.value_counts(bins=3)
pser.name = "index"
psser.name = "index"
self.assert_eq(psser.value_counts(), pser.value_counts())
# Series from DataFrame
pdf = pd.DataFrame({"a": [2, 2, 3], "b": [None, 1, None]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.a.value_counts(normalize=True), pdf.a.value_counts(normalize=True))
self.assert_eq(psdf.a.value_counts(ascending=True), pdf.a.value_counts(ascending=True))
self.assert_eq(
psdf.a.value_counts(normalize=True, dropna=False),
pdf.a.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
psdf.a.value_counts(ascending=True, dropna=False),
pdf.a.value_counts(ascending=True, dropna=False),
)
self.assert_eq(
psser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True)
)
self.assert_eq(
psser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True)
)
self.assert_eq(
psser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
psser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
)
# Series with NaN index
pser = pd.Series([3, 2, 3, 1, 2, 3], index=[2.0, None, 5.0, 5.0, None, 5.0])
psser = ps.from_pandas(pser)
self.assert_eq(psser.value_counts(normalize=True), pser.value_counts(normalize=True))
self.assert_eq(psser.value_counts(ascending=True), pser.value_counts(ascending=True))
self.assert_eq(
psser.value_counts(normalize=True, dropna=False),
pser.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
psser.value_counts(ascending=True, dropna=False),
pser.value_counts(ascending=True, dropna=False),
)
self.assert_eq(
psser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True)
)
self.assert_eq(
psser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True)
)
self.assert_eq(
psser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
psser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
)
# Series with MultiIndex
pser.index = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "c"), ("x", "a"), ("y", "c"), ("x", "a")]
)
psser = ps.from_pandas(pser)
self.assert_eq(psser.value_counts(normalize=True), pser.value_counts(normalize=True))
self.assert_eq(psser.value_counts(ascending=True), pser.value_counts(ascending=True))
self.assert_eq(
psser.value_counts(normalize=True, dropna=False),
pser.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
psser.value_counts(ascending=True, dropna=False),
pser.value_counts(ascending=True, dropna=False),
)
# FIXME: MultiIndex.value_counts returns wrong indices.
self.assert_eq(
psser.index.value_counts(normalize=True),
pser.index.value_counts(normalize=True),
almost=True,
)
self.assert_eq(
psser.index.value_counts(ascending=True),
pser.index.value_counts(ascending=True),
almost=True,
)
self.assert_eq(
psser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
almost=True,
)
self.assert_eq(
psser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
almost=True,
)
# Series with MultiIndex some of index has NaN
pser.index = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", None), ("y", "c"), ("x", "a"), ("y", "c"), ("x", "a")]
)
psser = ps.from_pandas(pser)
self.assert_eq(psser.value_counts(normalize=True), pser.value_counts(normalize=True))
self.assert_eq(psser.value_counts(ascending=True), pser.value_counts(ascending=True))
self.assert_eq(
psser.value_counts(normalize=True, dropna=False),
pser.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
psser.value_counts(ascending=True, dropna=False),
pser.value_counts(ascending=True, dropna=False),
)
# FIXME: MultiIndex.value_counts returns wrong indices.
self.assert_eq(
psser.index.value_counts(normalize=True),
pser.index.value_counts(normalize=True),
almost=True,
)
self.assert_eq(
psser.index.value_counts(ascending=True),
pser.index.value_counts(ascending=True),
almost=True,
)
self.assert_eq(
psser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
almost=True,
)
self.assert_eq(
psser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
almost=True,
)
# Series with MultiIndex some of index is NaN.
# This test only available for pandas >= 0.24.
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
pser.index = pd.MultiIndex.from_tuples(
[("x", "a"), None, ("y", "c"), ("x", "a"), ("y", "c"), ("x", "a")]
)
psser = ps.from_pandas(pser)
self.assert_eq(psser.value_counts(normalize=True), pser.value_counts(normalize=True))
self.assert_eq(psser.value_counts(ascending=True), pser.value_counts(ascending=True))
self.assert_eq(
psser.value_counts(normalize=True, dropna=False),
pser.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
psser.value_counts(ascending=True, dropna=False),
pser.value_counts(ascending=True, dropna=False),
)
# FIXME: MultiIndex.value_counts returns wrong indices.
self.assert_eq(
psser.index.value_counts(normalize=True),
pser.index.value_counts(normalize=True),
almost=True,
)
self.assert_eq(
psser.index.value_counts(ascending=True),
pser.index.value_counts(ascending=True),
almost=True,
)
self.assert_eq(
psser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
almost=True,
)
self.assert_eq(
psser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
almost=True,
)
def test_nsmallest(self):
sample_lst = [1, 2, 3, 4, np.nan, 6]
pser = pd.Series(sample_lst, name="x")
psser = ps.Series(sample_lst, name="x")
self.assert_eq(psser.nsmallest(n=3), pser.nsmallest(n=3))
self.assert_eq(psser.nsmallest(), pser.nsmallest())
self.assert_eq((psser + 1).nsmallest(), (pser + 1).nsmallest())
def test_nlargest(self):
sample_lst = [1, 2, 3, 4, np.nan, 6]
pser = pd.Series(sample_lst, name="x")
psser = ps.Series(sample_lst, name="x")
self.assert_eq(psser.nlargest(n=3), pser.nlargest(n=3))
self.assert_eq(psser.nlargest(), pser.nlargest())
self.assert_eq((psser + 1).nlargest(), (pser + 1).nlargest())
def test_notnull(self):
pser = pd.Series([1, 2, 3, 4, np.nan, 6], name="x")
psser = ps.from_pandas(pser)
self.assert_eq(psser.notnull(), pser.notnull())
pser = self.pser
psser = self.psser
self.assert_eq(psser.notnull(), pser.notnull())
def test_all(self):
for pser in [
pd.Series([True, True], name="x"),
pd.Series([True, False], name="x"),
pd.Series([0, 1], name="x"),
pd.Series([1, 2, 3], name="x"),
pd.Series([True, True, None], name="x"),
pd.Series([True, False, None], name="x"),
pd.Series([], name="x"),
pd.Series([np.nan], name="x"),
]:
psser = ps.from_pandas(pser)
self.assert_eq(psser.all(), pser.all())
pser = pd.Series([1, 2, 3, 4], name="x")
psser = ps.from_pandas(pser)
self.assert_eq((psser % 2 == 0).all(), (pser % 2 == 0).all())
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
psser.all(axis=1)
def test_any(self):
for pser in [
pd.Series([False, False], name="x"),
pd.Series([True, False], name="x"),
pd.Series([0, 1], name="x"),
pd.Series([1, 2, 3], name="x"),
pd.Series([True, True, None], name="x"),
pd.Series([True, False, None], name="x"),
pd.Series([], name="x"),
pd.Series([np.nan], name="x"),
]:
psser = ps.from_pandas(pser)
self.assert_eq(psser.any(), pser.any())
pser = pd.Series([1, 2, 3, 4], name="x")
psser = ps.from_pandas(pser)
self.assert_eq((psser % 2 == 0).any(), (pser % 2 == 0).any())
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
psser.any(axis=1)
def test_reset_index(self):
pdf = pd.DataFrame({"foo": [1, 2, 3, 4]}, index=pd.Index(["a", "b", "c", "d"], name="idx"))
psdf = ps.from_pandas(pdf)
pser = pdf.foo
psser = psdf.foo
self.assert_eq(psser.reset_index(), pser.reset_index())
self.assert_eq(psser.reset_index(name="values"), pser.reset_index(name="values"))
self.assert_eq(psser.reset_index(drop=True), pser.reset_index(drop=True))
# inplace
psser.reset_index(drop=True, inplace=True)
pser.reset_index(drop=True, inplace=True)
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
def test_reset_index_with_default_index_types(self):
pser = pd.Series([1, 2, 3], name="0", index=np.random.rand(3))
psser = ps.from_pandas(pser)
with ps.option_context("compute.default_index_type", "sequence"):
self.assert_eq(psser.reset_index(), pser.reset_index())
with ps.option_context("compute.default_index_type", "distributed-sequence"):
# the order might be changed.
self.assert_eq(psser.reset_index().sort_index(), pser.reset_index())
with ps.option_context("compute.default_index_type", "distributed"):
# the index is different.
self.assert_eq(
psser.reset_index().to_pandas().reset_index(drop=True), pser.reset_index()
)
def test_index_to_series_reset_index(self):
def check(psser, pser):
self.assert_eq(psser.reset_index(), pser.reset_index())
self.assert_eq(psser.reset_index(drop=True), pser.reset_index(drop=True))
pser.reset_index(drop=True, inplace=True)
psser.reset_index(drop=True, inplace=True)
self.assert_eq(psser, pser)
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=np.random.rand(9),
)
psdf = ps.from_pandas(pdf)
check(psdf.index.to_series(), pdf.index.to_series())
check(psdf.index.to_series(name="a"), pdf.index.to_series(name="a"))
check(psdf.index.to_series(name=("x", "a")), pdf.index.to_series(name=("x", "a")))
def test_sort_values(self):
pdf = pd.DataFrame({"x": [1, 2, 3, 4, 5, None, 7]})
psdf = ps.from_pandas(pdf)
pser = pdf.x
psser = psdf.x
self.assert_eq(psser.sort_values(), pser.sort_values())
self.assert_eq(psser.sort_values(ascending=False), pser.sort_values(ascending=False))
self.assert_eq(
psser.sort_values(na_position="first"), pser.sort_values(na_position="first")
)
self.assertRaises(ValueError, lambda: psser.sort_values(na_position="invalid"))
# inplace
# pandas raises an exception when the Series is derived from DataFrame
psser.sort_values(inplace=True)
self.assert_eq(psser, pser.sort_values())
self.assert_eq(psdf, pdf)
pser = pdf.x.copy()
psser = psdf.x.copy()
psser.sort_values(inplace=True)
pser.sort_values(inplace=True)
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
def test_sort_index(self):
pdf = pd.DataFrame({"x": [2, 1, np.nan]}, index=["b", "a", np.nan])
psdf = ps.from_pandas(pdf)
pser = pdf.x
psser = psdf.x
# Assert invalid parameters
self.assertRaises(NotImplementedError, lambda: psser.sort_index(axis=1))
self.assertRaises(NotImplementedError, lambda: psser.sort_index(kind="mergesort"))
self.assertRaises(ValueError, lambda: psser.sort_index(na_position="invalid"))
# Assert default behavior without parameters
self.assert_eq(psser.sort_index(), pser.sort_index())
# Assert sorting descending
self.assert_eq(psser.sort_index(ascending=False), pser.sort_index(ascending=False))
# Assert sorting NA indices first
self.assert_eq(psser.sort_index(na_position="first"), pser.sort_index(na_position="first"))
# Assert sorting inplace
# pandas sorts pdf.x by the index and update the column only
# when the Series is derived from DataFrame.
psser.sort_index(inplace=True)
self.assert_eq(psser, pser.sort_index())
self.assert_eq(psdf, pdf)
pser = pdf.x.copy()
psser = psdf.x.copy()
psser.sort_index(inplace=True)
pser.sort_index(inplace=True)
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
# Assert multi-indices
pser = pd.Series(range(4), index=[["b", "b", "a", "a"], [1, 0, 1, 0]], name="0")
psser = ps.from_pandas(pser)
self.assert_eq(psser.sort_index(), pser.sort_index())
self.assert_eq(psser.sort_index(level=[1, 0]), pser.sort_index(level=[1, 0]))
self.assert_eq(psser.reset_index().sort_index(), pser.reset_index().sort_index())
def test_to_datetime(self):
pser = pd.Series(["3/11/2000", "3/12/2000", "3/13/2000"] * 100)
psser = ps.from_pandas(pser)
self.assert_eq(
pd.to_datetime(pser, infer_datetime_format=True),
ps.to_datetime(psser, infer_datetime_format=True),
)
def test_missing(self):
psser = self.psser
missing_functions = inspect.getmembers(MissingPandasLikeSeries, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Series.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psser, name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Series.*{}.*is deprecated".format(name)
):
getattr(psser, name)()
missing_properties = inspect.getmembers(
MissingPandasLikeSeries, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Series.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psser, name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Series.*{}.*is deprecated".format(name)
):
getattr(psser, name)
def test_clip(self):
pser = pd.Series([0, 2, 4], index=np.random.rand(3))
psser = ps.from_pandas(pser)
# Assert list-like values are not accepted for 'lower' and 'upper'
msg = "List-like value are not supported for 'lower' and 'upper' at the moment"
with self.assertRaises(TypeError, msg=msg):
psser.clip(lower=[1])
with self.assertRaises(TypeError, msg=msg):
psser.clip(upper=[1])
# Assert no lower or upper
self.assert_eq(psser.clip(), pser.clip())
# Assert lower only
self.assert_eq(psser.clip(1), pser.clip(1))
# Assert upper only
self.assert_eq(psser.clip(upper=3), pser.clip(upper=3))
# Assert lower and upper
self.assert_eq(psser.clip(1, 3), pser.clip(1, 3))
# Assert behavior on string values
str_psser = ps.Series(["a", "b", "c"])
self.assert_eq(str_psser.clip(1, 3), str_psser)
def test_compare(self):
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
pser = pd.Series([1, 2])
psser = ps.from_pandas(pser)
res_psdf = psser.compare(psser)
self.assertTrue(res_psdf.empty)
self.assert_eq(res_psdf.columns, pd.Index(["self", "other"]))
self.assert_eq(
pser.compare(pser + 1).sort_index(), psser.compare(psser + 1).sort_index()
)
pser = pd.Series([1, 2], index=["x", "y"])
psser = ps.from_pandas(pser)
self.assert_eq(
pser.compare(pser + 1).sort_index(), psser.compare(psser + 1).sort_index()
)
else:
psser = ps.Series([1, 2])
res_psdf = psser.compare(psser)
self.assertTrue(res_psdf.empty)
self.assert_eq(res_psdf.columns, pd.Index(["self", "other"]))
expected = ps.DataFrame([[1, 2], [2, 3]], columns=["self", "other"])
self.assert_eq(expected, psser.compare(psser + 1).sort_index())
psser = ps.Series([1, 2], index=["x", "y"])
expected = ps.DataFrame([[1, 2], [2, 3]], index=["x", "y"], columns=["self", "other"])
self.assert_eq(expected, psser.compare(psser + 1).sort_index())
def test_is_unique(self):
# We can't use pandas' is_unique for comparison. pandas 0.23 ignores None
pser = pd.Series([1, 2, 2, None, None])
psser = ps.from_pandas(pser)
self.assertEqual(False, psser.is_unique)
self.assertEqual(False, (psser + 1).is_unique)
pser = pd.Series([1, None, None])
psser = ps.from_pandas(pser)
self.assertEqual(False, psser.is_unique)
self.assertEqual(False, (psser + 1).is_unique)
pser = pd.Series([1])
psser = ps.from_pandas(pser)
self.assertEqual(pser.is_unique, psser.is_unique)
self.assertEqual((pser + 1).is_unique, (psser + 1).is_unique)
pser = pd.Series([1, 1, 1])
psser = ps.from_pandas(pser)
self.assertEqual(pser.is_unique, psser.is_unique)
self.assertEqual((pser + 1).is_unique, (psser + 1).is_unique)
def test_to_list(self):
self.assert_eq(self.psser.tolist(), self.pser.tolist())
def test_append(self):
pser1 = pd.Series([1, 2, 3], name="0")
pser2 = pd.Series([4, 5, 6], name="0")
pser3 = pd.Series([4, 5, 6], index=[3, 4, 5], name="0")
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
psser3 = ps.from_pandas(pser3)
self.assert_eq(psser1.append(psser2), pser1.append(pser2))
self.assert_eq(psser1.append(psser3), pser1.append(pser3))
self.assert_eq(
psser1.append(psser2, ignore_index=True), pser1.append(pser2, ignore_index=True)
)
psser1.append(psser3, verify_integrity=True)
msg = "Indices have overlapping values"
with self.assertRaises(ValueError, msg=msg):
psser1.append(psser2, verify_integrity=True)
def test_map(self):
pser = pd.Series(["cat", "dog", None, "rabbit"])
psser = ps.from_pandas(pser)
# Currently Koalas doesn't return NaN as pandas does.
self.assert_eq(psser.map({}), pser.map({}).replace({pd.np.nan: None}))
d = defaultdict(lambda: "abc")
self.assertTrue("abc" in repr(psser.map(d)))
self.assert_eq(psser.map(d), pser.map(d))
def tomorrow(date) -> datetime:
return date + timedelta(days=1)
pser = pd.Series([datetime(2019, 10, 24)])
psser = ps.from_pandas(pser)
self.assert_eq(psser.map(tomorrow), pser.map(tomorrow))
def test_add_prefix(self):
pser = pd.Series([1, 2, 3, 4], name="0")
psser = ps.from_pandas(pser)
self.assert_eq(pser.add_prefix("item_"), psser.add_prefix("item_"))
pser = pd.Series(
[1, 2, 3],
name="0",
index=pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y"), ("B", "X")]),
)
psser = ps.from_pandas(pser)
self.assert_eq(pser.add_prefix("item_"), psser.add_prefix("item_"))
def test_add_suffix(self):
pser = pd.Series([1, 2, 3, 4], name="0")
psser = ps.from_pandas(pser)
self.assert_eq(pser.add_suffix("_item"), psser.add_suffix("_item"))
pser = pd.Series(
[1, 2, 3],
name="0",
index=pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y"), ("B", "X")]),
)
psser = ps.from_pandas(pser)
self.assert_eq(pser.add_suffix("_item"), psser.add_suffix("_item"))
def test_cummin(self):
pser = pd.Series([1.0, None, 0.0, 4.0, 9.0])
psser = ps.from_pandas(pser)
self.assert_eq(pser.cummin(), psser.cummin())
self.assert_eq(pser.cummin(skipna=False), psser.cummin(skipna=False))
self.assert_eq(pser.cummin().sum(), psser.cummin().sum())
# with reversed index
pser.index = [4, 3, 2, 1, 0]
psser = ps.from_pandas(pser)
self.assert_eq(pser.cummin(), psser.cummin())
self.assert_eq(pser.cummin(skipna=False), psser.cummin(skipna=False))
def test_cummax(self):
pser = pd.Series([1.0, None, 0.0, 4.0, 9.0])
psser = ps.from_pandas(pser)
self.assert_eq(pser.cummax(), psser.cummax())
self.assert_eq(pser.cummax(skipna=False), psser.cummax(skipna=False))
self.assert_eq(pser.cummax().sum(), psser.cummax().sum())
# with reversed index
pser.index = [4, 3, 2, 1, 0]
psser = ps.from_pandas(pser)
self.assert_eq(pser.cummax(), psser.cummax())
self.assert_eq(pser.cummax(skipna=False), psser.cummax(skipna=False))
def test_cumsum(self):
pser = pd.Series([1.0, None, 0.0, 4.0, 9.0])
psser = ps.from_pandas(pser)
self.assert_eq(pser.cumsum(), psser.cumsum())
self.assert_eq(pser.cumsum(skipna=False), psser.cumsum(skipna=False))
self.assert_eq(pser.cumsum().sum(), psser.cumsum().sum())
# with reversed index
pser.index = [4, 3, 2, 1, 0]
psser = ps.from_pandas(pser)
self.assert_eq(pser.cumsum(), psser.cumsum())
self.assert_eq(pser.cumsum(skipna=False), psser.cumsum(skipna=False))
# bool
pser = pd.Series([True, True, False, True])
psser = ps.from_pandas(pser)
self.assert_eq(pser.cumsum().astype(int), psser.cumsum())
self.assert_eq(pser.cumsum(skipna=False).astype(int), psser.cumsum(skipna=False))
def test_cumprod(self):
pser = pd.Series([1.0, None, 1.0, 4.0, 9.0])
psser = ps.from_pandas(pser)
self.assert_eq(pser.cumprod(), psser.cumprod())
self.assert_eq(pser.cumprod(skipna=False), psser.cumprod(skipna=False))
self.assert_eq(pser.cumprod().sum(), psser.cumprod().sum())
# with integer type
pser = pd.Series([1, 10, 1, 4, 9])
psser = ps.from_pandas(pser)
self.assert_eq(pser.cumprod(), psser.cumprod())
self.assert_eq(pser.cumprod(skipna=False), psser.cumprod(skipna=False))
self.assert_eq(pser.cumprod().sum(), psser.cumprod().sum())
# with reversed index
pser.index = [4, 3, 2, 1, 0]
psser = ps.from_pandas(pser)
self.assert_eq(pser.cumprod(), psser.cumprod())
self.assert_eq(pser.cumprod(skipna=False), psser.cumprod(skipna=False))
# including zero
pser = pd.Series([1, 2, 0, 3])
psser = ps.from_pandas(pser)
self.assert_eq(pser.cumprod(), psser.cumprod())
self.assert_eq(pser.cumprod(skipna=False), psser.cumprod(skipna=False))
# including negative values
pser = pd.Series([1, -1, -2])
psser = ps.from_pandas(pser)
self.assert_eq(pser.cumprod(), psser.cumprod())
self.assert_eq(pser.cumprod(skipna=False), psser.cumprod(skipna=False))
# bool
pser = pd.Series([True, True, False, True])
psser = ps.from_pandas(pser)
self.assert_eq(pser.cumprod(), psser.cumprod())
self.assert_eq(pser.cumprod(skipna=False).astype(int), psser.cumprod(skipna=False))
def test_median(self):
with self.assertRaisesRegex(TypeError, "accuracy must be an integer; however"):
ps.Series([24.0, 21.0, 25.0, 33.0, 26.0]).median(accuracy="a")
def test_rank(self):
pser = pd.Series([1, 2, 3, 1], name="x")
psser = ps.from_pandas(pser)
self.assert_eq(pser.rank(), psser.rank().sort_index())
self.assert_eq(pser.rank().sum(), psser.rank().sum())
self.assert_eq(pser.rank(ascending=False), psser.rank(ascending=False).sort_index())
self.assert_eq(pser.rank(method="min"), psser.rank(method="min").sort_index())
self.assert_eq(pser.rank(method="max"), psser.rank(method="max").sort_index())
self.assert_eq(pser.rank(method="first"), psser.rank(method="first").sort_index())
self.assert_eq(pser.rank(method="dense"), psser.rank(method="dense").sort_index())
msg = "method must be one of 'average', 'min', 'max', 'first', 'dense'"
with self.assertRaisesRegex(ValueError, msg):
psser.rank(method="nothing")
def test_round(self):
pser = pd.Series([0.028208, 0.038683, 0.877076], name="x")
psser = ps.from_pandas(pser)
self.assert_eq(pser.round(2), psser.round(2))
msg = "decimals must be an integer"
with self.assertRaisesRegex(TypeError, msg):
psser.round(1.5)
def test_quantile(self):
pser = pd.Series([])
psser = ps.from_pandas(pser)
self.assert_eq(psser.quantile(0.5), pser.quantile(0.5))
self.assert_eq(psser.quantile([0.25, 0.5, 0.75]), pser.quantile([0.25, 0.5, 0.75]))
with self.assertRaisesRegex(TypeError, "accuracy must be an integer; however"):
ps.Series([24.0, 21.0, 25.0, 33.0, 26.0]).quantile(accuracy="a")
with self.assertRaisesRegex(TypeError, "q must be a float or an array of floats;"):
ps.Series([24.0, 21.0, 25.0, 33.0, 26.0]).quantile(q="a")
with self.assertRaisesRegex(TypeError, "q must be a float or an array of floats;"):
ps.Series([24.0, 21.0, 25.0, 33.0, 26.0]).quantile(q=["a"])
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
ps.Series(["a", "b", "c"]).quantile()
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
ps.Series(["a", "b", "c"]).quantile([0.25, 0.5, 0.75])
def test_idxmax(self):
pser = pd.Series(data=[1, 4, 5], index=["A", "B", "C"])
psser = ps.Series(pser)
self.assertEqual(psser.idxmax(), pser.idxmax())
self.assertEqual(psser.idxmax(skipna=False), pser.idxmax(skipna=False))
index = pd.MultiIndex.from_arrays(
[["a", "a", "b", "b"], ["c", "d", "e", "f"]], names=("first", "second")
)
pser = pd.Series(data=[1, 2, 4, 5], index=index)
psser = ps.Series(pser)
self.assertEqual(psser.idxmax(), pser.idxmax())
self.assertEqual(psser.idxmax(skipna=False), pser.idxmax(skipna=False))
psser = ps.Series([])
with self.assertRaisesRegex(ValueError, "an empty sequence"):
psser.idxmax()
pser = pd.Series([1, 100, None, 100, 1, 100], index=[10, 3, 5, 2, 1, 8])
psser = ps.Series(pser)
self.assertEqual(psser.idxmax(), pser.idxmax())
self.assertEqual(repr(psser.idxmax(skipna=False)), repr(pser.idxmax(skipna=False)))
def test_idxmin(self):
pser = pd.Series(data=[1, 4, 5], index=["A", "B", "C"])
psser = ps.Series(pser)
self.assertEqual(psser.idxmin(), pser.idxmin())
self.assertEqual(psser.idxmin(skipna=False), pser.idxmin(skipna=False))
index = pd.MultiIndex.from_arrays(
[["a", "a", "b", "b"], ["c", "d", "e", "f"]], names=("first", "second")
)
pser = pd.Series(data=[1, 2, 4, 5], index=index)
psser = ps.Series(pser)
self.assertEqual(psser.idxmin(), pser.idxmin())
self.assertEqual(psser.idxmin(skipna=False), pser.idxmin(skipna=False))
psser = ps.Series([])
with self.assertRaisesRegex(ValueError, "an empty sequence"):
psser.idxmin()
pser = pd.Series([1, 100, None, 100, 1, 100], index=[10, 3, 5, 2, 1, 8])
psser = ps.Series(pser)
self.assertEqual(psser.idxmin(), pser.idxmin())
self.assertEqual(repr(psser.idxmin(skipna=False)), repr(pser.idxmin(skipna=False)))
def test_shift(self):
pser = pd.Series([10, 20, 15, 30, 45], name="x")
psser = ps.Series(pser)
self.assert_eq(psser.shift(2), pser.shift(2))
self.assert_eq(psser.shift().shift(-1), pser.shift().shift(-1))
self.assert_eq(psser.shift().sum(), pser.shift().sum())
if LooseVersion(pd.__version__) < LooseVersion("0.24.2"):
self.assert_eq(psser.shift(periods=2), pser.shift(periods=2))
else:
self.assert_eq(
psser.shift(periods=2, fill_value=0), pser.shift(periods=2, fill_value=0)
)
with self.assertRaisesRegex(TypeError, "periods should be an int; however"):
psser.shift(periods=1.5)
def test_diff(self):
pser = pd.Series([10, 20, 15, 30, 45], name="x")
psser = ps.Series(pser)
self.assert_eq(psser.diff(2), pser.diff(2))
self.assert_eq(psser.diff().diff(-1), pser.diff().diff(-1))
self.assert_eq(psser.diff().sum(), pser.diff().sum())
def _test_numeric_astype(self, pser):
psser = ps.Series(pser)
self.assert_eq(psser.astype(int), pser.astype(int))
self.assert_eq(psser.astype(np.int), pser.astype(np.int))
self.assert_eq(psser.astype(np.int8), pser.astype(np.int8))
self.assert_eq(psser.astype(np.int16), pser.astype(np.int16))
self.assert_eq(psser.astype(np.int32), pser.astype(np.int32))
self.assert_eq(psser.astype(np.int64), pser.astype(np.int64))
self.assert_eq(psser.astype(np.byte), pser.astype(np.byte))
self.assert_eq(psser.astype("int"), pser.astype("int"))
self.assert_eq(psser.astype("int8"), pser.astype("int8"))
self.assert_eq(psser.astype("int16"), pser.astype("int16"))
self.assert_eq(psser.astype("int32"), pser.astype("int32"))
self.assert_eq(psser.astype("int64"), pser.astype("int64"))
self.assert_eq(psser.astype("b"), pser.astype("b"))
self.assert_eq(psser.astype("byte"), pser.astype("byte"))
self.assert_eq(psser.astype("i"), pser.astype("i"))
self.assert_eq(psser.astype("long"), pser.astype("long"))
self.assert_eq(psser.astype("short"), pser.astype("short"))
self.assert_eq(psser.astype(np.float), pser.astype(np.float))
self.assert_eq(psser.astype(np.float32), pser.astype(np.float32))
self.assert_eq(psser.astype(np.float64), pser.astype(np.float64))
self.assert_eq(psser.astype("float"), pser.astype("float"))
self.assert_eq(psser.astype("float32"), pser.astype("float32"))
self.assert_eq(psser.astype("float64"), pser.astype("float64"))
self.assert_eq(psser.astype("double"), pser.astype("double"))
self.assert_eq(psser.astype("f"), pser.astype("f"))
self.assert_eq(psser.astype(bool), pser.astype(bool))
self.assert_eq(psser.astype("bool"), pser.astype("bool"))
self.assert_eq(psser.astype("?"), pser.astype("?"))
self.assert_eq(psser.astype(np.unicode_), pser.astype(np.unicode_))
self.assert_eq(psser.astype("str"), pser.astype("str"))
self.assert_eq(psser.astype("U"), pser.astype("U"))
if extension_dtypes_available:
from pandas import Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype
self._check_extension(psser.astype("Int8"), pser.astype("Int8"))
self._check_extension(psser.astype("Int16"), pser.astype("Int16"))
self._check_extension(psser.astype("Int32"), pser.astype("Int32"))
self._check_extension(psser.astype("Int64"), pser.astype("Int64"))
self._check_extension(psser.astype(Int8Dtype()), pser.astype(Int8Dtype()))
self._check_extension(psser.astype(Int16Dtype()), pser.astype(Int16Dtype()))
self._check_extension(psser.astype(Int32Dtype()), pser.astype(Int32Dtype()))
self._check_extension(psser.astype(Int64Dtype()), pser.astype(Int64Dtype()))
if extension_object_dtypes_available:
from pandas import StringDtype
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
self._check_extension(psser.astype("string"), pser.astype("string"))
self._check_extension(psser.astype(StringDtype()), pser.astype(StringDtype()))
else:
self._check_extension(
psser.astype("string"),
pd.Series(["10", "20", "15", "30", "45"], name="x", dtype="string"),
)
self._check_extension(
psser.astype(StringDtype()),
pd.Series(["10", "20", "15", "30", "45"], name="x", dtype=StringDtype()),
)
if extension_float_dtypes_available:
from pandas import Float32Dtype, Float64Dtype
self._check_extension(psser.astype("Float32"), pser.astype("Float32"))
self._check_extension(psser.astype("Float64"), pser.astype("Float64"))
self._check_extension(psser.astype(Float32Dtype()), pser.astype(Float32Dtype()))
self._check_extension(psser.astype(Float64Dtype()), pser.astype(Float64Dtype()))
def test_astype(self):
psers = [pd.Series([10, 20, 15, 30, 45], name="x")]
if extension_dtypes_available:
psers.append(pd.Series([10, 20, 15, 30, 45], name="x", dtype="Int64"))
if extension_float_dtypes_available:
psers.append(pd.Series([10, 20, 15, 30, 45], name="x", dtype="Float64"))
for pser in psers:
self._test_numeric_astype(pser)
pser = pd.Series([10, 20, 15, 30, 45, None, np.nan], name="x")
psser = ps.Series(pser)
self.assert_eq(psser.astype(bool), pser.astype(bool))
self.assert_eq(psser.astype(str), pser.astype(str))
pser = pd.Series(["hi", "hi ", " ", " \t", "", None], name="x")
psser = ps.Series(pser)
self.assert_eq(psser.astype(bool), pser.astype(bool))
if LooseVersion("1.1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.1.4"):
# a pandas bug: https://github.com/databricks/koalas/pull/1818#issuecomment-703961980
self.assert_eq(psser.astype(str).tolist(), ["hi", "hi ", " ", " \t", "", "None"])
else:
self.assert_eq(psser.astype(str), pser.astype(str))
self.assert_eq(psser.str.strip().astype(bool), pser.str.strip().astype(bool))
if extension_object_dtypes_available:
from pandas import StringDtype
self._check_extension(psser.astype("string"), pser.astype("string"))
self._check_extension(psser.astype(StringDtype()), pser.astype(StringDtype()))
pser = pd.Series([True, False, None], name="x")
psser = ps.Series(pser)
self.assert_eq(psser.astype(bool), pser.astype(bool))
self.assert_eq(psser.astype(str), pser.astype(str))
if extension_object_dtypes_available:
from pandas import BooleanDtype, StringDtype
self._check_extension(psser.astype("boolean"), pser.astype("boolean"))
self._check_extension(psser.astype(BooleanDtype()), pser.astype(BooleanDtype()))
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
self._check_extension(psser.astype("string"), pser.astype("string"))
self._check_extension(psser.astype(StringDtype()), pser.astype(StringDtype()))
else:
self._check_extension(
psser.astype("string"),
pd.Series(["True", "False", None], name="x", dtype="string"),
)
self._check_extension(
psser.astype(StringDtype()),
pd.Series(["True", "False", None], name="x", dtype=StringDtype()),
)
pser = pd.Series(["2020-10-27 00:00:01", None], name="x")
psser = ps.Series(pser)
self.assert_eq(psser.astype(np.datetime64), pser.astype(np.datetime64))
self.assert_eq(psser.astype("datetime64[ns]"), pser.astype("datetime64[ns]"))
self.assert_eq(psser.astype("M"), pser.astype("M"))
self.assert_eq(psser.astype("M").astype(str), pser.astype("M").astype(str))
# Comment out the below test cause because pandas returns `NaT` or `nan` randomly
# self.assert_eq(
# psser.astype("M").dt.date.astype(str), pser.astype("M").dt.date.astype(str)
# )
if extension_object_dtypes_available:
from pandas import StringDtype
self._check_extension(
psser.astype("M").astype("string"), pser.astype("M").astype("string")
)
self._check_extension(
psser.astype("M").astype(StringDtype()), pser.astype("M").astype(StringDtype())
)
with self.assertRaisesRegex(TypeError, "not understood"):
psser.astype("int63")
def test_aggregate(self):
pser = pd.Series([10, 20, 15, 30, 45], name="x")
psser = ps.Series(pser)
msg = "func must be a string or list of strings"
with self.assertRaisesRegex(TypeError, msg):
psser.aggregate({"x": ["min", "max"]})
msg = (
"If the given function is a list, it " "should only contains function names as strings."
)
with self.assertRaisesRegex(ValueError, msg):
psser.aggregate(["min", max])
def test_drop(self):
pser = pd.Series([10, 20, 15, 30, 45], name="x")
psser = ps.Series(pser)
self.assert_eq(psser.drop(1), pser.drop(1))
self.assert_eq(psser.drop([1, 4]), pser.drop([1, 4]))
msg = "Need to specify at least one of 'labels' or 'index'"
with self.assertRaisesRegex(ValueError, msg):
psser.drop()
self.assertRaises(KeyError, lambda: psser.drop((0, 1)))
# For MultiIndex
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(psser.drop("lama"), pser.drop("lama"))
self.assert_eq(psser.drop(labels="weight", level=1), pser.drop(labels="weight", level=1))
self.assert_eq(psser.drop(("lama", "weight")), pser.drop(("lama", "weight")))
self.assert_eq(
psser.drop([("lama", "speed"), ("falcon", "weight")]),
pser.drop([("lama", "speed"), ("falcon", "weight")]),
)
self.assert_eq(psser.drop({"lama": "speed"}), pser.drop({"lama": "speed"}))
msg = "'level' should be less than the number of indexes"
with self.assertRaisesRegex(ValueError, msg):
psser.drop(labels="weight", level=2)
msg = (
"If the given index is a list, it "
"should only contains names as all tuples or all non tuples "
"that contain index names"
)
with self.assertRaisesRegex(ValueError, msg):
psser.drop(["lama", ["cow", "falcon"]])
msg = "Cannot specify both 'labels' and 'index'"
with self.assertRaisesRegex(ValueError, msg):
psser.drop("lama", index="cow")
msg = r"'Key length \(2\) exceeds index depth \(3\)'"
with self.assertRaisesRegex(KeyError, msg):
psser.drop(("lama", "speed", "x"))
def test_pop(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pdf = pd.DataFrame({"x": [45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3]}, index=midx)
psdf = ps.from_pandas(pdf)
pser = pdf.x
psser = psdf.x
self.assert_eq(psser.pop(("lama", "speed")), pser.pop(("lama", "speed")))
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
msg = r"'Key length \(3\) exceeds index depth \(2\)'"
with self.assertRaisesRegex(KeyError, msg):
psser.pop(("lama", "speed", "x"))
def test_replace(self):
pser = pd.Series([10, 20, 15, 30, np.nan], name="x")
psser = ps.Series(pser)
self.assert_eq(psser.replace(), pser.replace())
self.assert_eq(psser.replace({}), pser.replace({}))
self.assert_eq(psser.replace(np.nan, 45), pser.replace(np.nan, 45))
self.assert_eq(psser.replace([10, 15], 45), pser.replace([10, 15], 45))
self.assert_eq(psser.replace((10, 15), 45), pser.replace((10, 15), 45))
self.assert_eq(psser.replace([10, 15], [45, 50]), pser.replace([10, 15], [45, 50]))
self.assert_eq(psser.replace((10, 15), (45, 50)), pser.replace((10, 15), (45, 50)))
msg = "'to_replace' should be one of str, list, tuple, dict, int, float"
with self.assertRaisesRegex(TypeError, msg):
psser.replace(ps.range(5))
msg = "Replacement lists must match in length. Expecting 3 got 2"
with self.assertRaisesRegex(ValueError, msg):
psser.replace([10, 20, 30], [1, 2])
msg = "replace currently not support for regex"
with self.assertRaisesRegex(NotImplementedError, msg):
psser.replace(r"^1.$", regex=True)
def test_xs(self):
midx = pd.MultiIndex(
[["a", "b", "c"], ["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(psser.xs(("a", "lama", "speed")), pser.xs(("a", "lama", "speed")))
def test_duplicates(self):
psers = {
"test on texts": pd.Series(
["lama", "cow", "lama", "beetle", "lama", "hippo"], name="animal"
),
"test on numbers": pd.Series([1, 1, 2, 4, 3]),
}
keeps = ["first", "last", False]
for (msg, pser), keep in product(psers.items(), keeps):
with self.subTest(msg, keep=keep):
psser = ps.Series(pser)
self.assert_eq(
pser.drop_duplicates(keep=keep).sort_values(),
psser.drop_duplicates(keep=keep).sort_values(),
)
def test_update(self):
pser = pd.Series([10, 20, 15, 30, 45], name="x")
psser = ps.Series(pser)
msg = "'other' must be a Series"
with self.assertRaisesRegex(TypeError, msg):
psser.update(10)
def test_where(self):
pser1 = pd.Series([0, 1, 2, 3, 4])
psser1 = ps.from_pandas(pser1)
self.assert_eq(pser1.where(pser1 > 3), psser1.where(psser1 > 3).sort_index())
def test_mask(self):
pser1 = pd.Series([0, 1, 2, 3, 4])
psser1 = ps.from_pandas(pser1)
self.assert_eq(pser1.mask(pser1 > 3), psser1.mask(psser1 > 3).sort_index())
def test_truncate(self):
pser1 = pd.Series([10, 20, 30, 40, 50, 60, 70], index=[1, 2, 3, 4, 5, 6, 7])
psser1 = ps.Series(pser1)
pser2 = pd.Series([10, 20, 30, 40, 50, 60, 70], index=[7, 6, 5, 4, 3, 2, 1])
psser2 = ps.Series(pser2)
self.assert_eq(psser1.truncate(), pser1.truncate())
self.assert_eq(psser1.truncate(before=2), pser1.truncate(before=2))
self.assert_eq(psser1.truncate(after=5), pser1.truncate(after=5))
self.assert_eq(psser1.truncate(copy=False), pser1.truncate(copy=False))
self.assert_eq(psser1.truncate(2, 5, copy=False), pser1.truncate(2, 5, copy=False))
# The bug for these tests has been fixed in pandas 1.1.0.
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
self.assert_eq(psser2.truncate(4, 6), pser2.truncate(4, 6))
self.assert_eq(psser2.truncate(4, 6, copy=False), pser2.truncate(4, 6, copy=False))
else:
expected_psser = ps.Series([20, 30, 40], index=[6, 5, 4])
self.assert_eq(psser2.truncate(4, 6), expected_psser)
self.assert_eq(psser2.truncate(4, 6, copy=False), expected_psser)
psser = ps.Series([10, 20, 30, 40, 50, 60, 70], index=[1, 2, 3, 4, 3, 2, 1])
msg = "truncate requires a sorted index"
with self.assertRaisesRegex(ValueError, msg):
psser.truncate()
psser = ps.Series([10, 20, 30, 40, 50, 60, 70], index=[1, 2, 3, 4, 5, 6, 7])
msg = "Truncate: 2 must be after 5"
with self.assertRaisesRegex(ValueError, msg):
psser.truncate(5, 2)
def test_getitem(self):
pser = pd.Series([10, 20, 15, 30, 45], ["A", "A", "B", "C", "D"])
psser = ps.Series(pser)
self.assert_eq(psser["A"], pser["A"])
self.assert_eq(psser["B"], pser["B"])
self.assert_eq(psser[psser > 15], pser[pser > 15])
# for MultiIndex
midx = pd.MultiIndex(
[["a", "b", "c"], ["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], name="0", index=midx)
psser = ps.Series(pser)
self.assert_eq(psser["a"], pser["a"])
self.assert_eq(psser["a", "lama"], pser["a", "lama"])
self.assert_eq(psser[psser > 1.5], pser[pser > 1.5])
msg = r"'Key length \(4\) exceeds index depth \(3\)'"
with self.assertRaisesRegex(KeyError, msg):
psser[("a", "lama", "speed", "x")]
def test_keys(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(psser.keys(), pser.keys())
def test_index(self):
# to check setting name of Index properly.
idx = pd.Index([1, 2, 3, 4, 5, 6, 7, 8, 9])
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=idx)
psser = ps.from_pandas(pser)
psser.name = "koalas"
pser.name = "koalas"
self.assert_eq(psser.index.name, pser.index.name)
# for check setting names of MultiIndex properly.
psser.names = ["hello", "koalas"]
pser.names = ["hello", "koalas"]
self.assert_eq(psser.index.names, pser.index.names)
def test_pct_change(self):
pser = pd.Series([90, 91, 85], index=[2, 4, 1])
psser = ps.from_pandas(pser)
self.assert_eq(psser.pct_change(), pser.pct_change(), check_exact=False)
self.assert_eq(psser.pct_change().sum(), pser.pct_change().sum(), almost=True)
self.assert_eq(psser.pct_change(periods=2), pser.pct_change(periods=2), check_exact=False)
self.assert_eq(psser.pct_change(periods=-1), pser.pct_change(periods=-1), check_exact=False)
self.assert_eq(psser.pct_change(periods=-100000000), pser.pct_change(periods=-100000000))
self.assert_eq(psser.pct_change(periods=100000000), pser.pct_change(periods=100000000))
# for MultiIndex
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(psser.pct_change(), pser.pct_change(), check_exact=False)
self.assert_eq(psser.pct_change().sum(), pser.pct_change().sum(), almost=True)
self.assert_eq(psser.pct_change(periods=2), pser.pct_change(periods=2), check_exact=False)
self.assert_eq(psser.pct_change(periods=-1), pser.pct_change(periods=-1), check_exact=False)
self.assert_eq(psser.pct_change(periods=-100000000), pser.pct_change(periods=-100000000))
self.assert_eq(psser.pct_change(periods=100000000), pser.pct_change(periods=100000000))
def test_axes(self):
pser = pd.Series([90, 91, 85], index=[2, 4, 1])
psser = ps.from_pandas(pser)
self.assert_eq(psser.axes, pser.axes)
# for MultiIndex
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(psser.axes, pser.axes)
def test_udt(self):
sparse_values = {0: 0.1, 1: 1.1}
sparse_vector = SparseVector(len(sparse_values), sparse_values)
pser = pd.Series([sparse_vector])
psser = ps.from_pandas(pser)
self.assert_eq(psser, pser)
def test_repeat(self):
pser = pd.Series(["a", "b", "c"], name="0", index=np.random.rand(3))
psser = ps.from_pandas(pser)
self.assert_eq(psser.repeat(3).sort_index(), pser.repeat(3).sort_index())
self.assert_eq(psser.repeat(0).sort_index(), pser.repeat(0).sort_index())
self.assertRaises(ValueError, lambda: psser.repeat(-1))
self.assertRaises(TypeError, lambda: psser.repeat("abc"))
pdf = pd.DataFrame({"a": ["a", "b", "c"], "rep": [10, 20, 30]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.a.repeat(psdf.rep).sort_index(), pdf.a.repeat(pdf.rep).sort_index())
def test_take(self):
pser = pd.Series([100, 200, 300, 400, 500], name="Koalas")
psser = ps.from_pandas(pser)
self.assert_eq(psser.take([0, 2, 4]).sort_values(), pser.take([0, 2, 4]).sort_values())
self.assert_eq(
psser.take(range(0, 5, 2)).sort_values(), pser.take(range(0, 5, 2)).sort_values()
)
self.assert_eq(psser.take([-4, -2, 0]).sort_values(), pser.take([-4, -2, 0]).sort_values())
self.assert_eq(
psser.take(range(-2, 1, 2)).sort_values(), pser.take(range(-2, 1, 2)).sort_values()
)
# Checking the type of indices.
self.assertRaises(TypeError, lambda: psser.take(1))
self.assertRaises(TypeError, lambda: psser.take("1"))
self.assertRaises(TypeError, lambda: psser.take({1, 2}))
self.assertRaises(TypeError, lambda: psser.take({1: None, 2: None}))
def test_divmod(self):
pser = pd.Series([100, None, 300, None, 500], name="Koalas")
psser = ps.from_pandas(pser)
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
kdiv, kmod = psser.divmod(-100)
pdiv, pmod = pser.divmod(-100)
self.assert_eq(kdiv, pdiv)
self.assert_eq(kmod, pmod)
kdiv, kmod = psser.divmod(100)
pdiv, pmod = pser.divmod(100)
self.assert_eq(kdiv, pdiv)
self.assert_eq(kmod, pmod)
elif LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
kdiv, kmod = psser.divmod(-100)
pdiv, pmod = pser.floordiv(-100), pser.mod(-100)
self.assert_eq(kdiv, pdiv)
self.assert_eq(kmod, pmod)
kdiv, kmod = psser.divmod(100)
pdiv, pmod = pser.floordiv(100), pser.mod(100)
self.assert_eq(kdiv, pdiv)
self.assert_eq(kmod, pmod)
def test_rdivmod(self):
pser = pd.Series([100, None, 300, None, 500])
psser = ps.from_pandas(pser)
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
krdiv, krmod = psser.rdivmod(-100)
prdiv, prmod = pser.rdivmod(-100)
self.assert_eq(krdiv, prdiv)
self.assert_eq(krmod, prmod)
krdiv, krmod = psser.rdivmod(100)
prdiv, prmod = pser.rdivmod(100)
self.assert_eq(krdiv, prdiv)
self.assert_eq(krmod, prmod)
elif LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
krdiv, krmod = psser.rdivmod(-100)
prdiv, prmod = pser.rfloordiv(-100), pser.rmod(-100)
self.assert_eq(krdiv, prdiv)
self.assert_eq(krmod, prmod)
krdiv, krmod = psser.rdivmod(100)
prdiv, prmod = pser.rfloordiv(100), pser.rmod(100)
self.assert_eq(krdiv, prdiv)
self.assert_eq(krmod, prmod)
def test_mod(self):
pser = pd.Series([100, None, -300, None, 500, -700], name="Koalas")
psser = ps.from_pandas(pser)
self.assert_eq(psser.mod(-150), pser.mod(-150))
self.assert_eq(psser.mod(0), pser.mod(0))
self.assert_eq(psser.mod(150), pser.mod(150))
pdf = pd.DataFrame({"a": [100, None, -300, None, 500, -700], "b": [150] * 6})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.a.mod(psdf.b), pdf.a.mod(pdf.b))
def test_mode(self):
pser = pd.Series([0, 0, 1, 1, 1, np.nan, np.nan, np.nan])
psser = ps.from_pandas(pser)
self.assert_eq(psser.mode(), pser.mode())
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
# The `dropna` argument is added in pandas 0.24.
self.assert_eq(
psser.mode(dropna=False).sort_values().reset_index(drop=True),
pser.mode(dropna=False).sort_values().reset_index(drop=True),
)
pser.name = "x"
psser = ps.from_pandas(pser)
self.assert_eq(psser.mode(), pser.mode())
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
# The `dropna` argument is added in pandas 0.24.
self.assert_eq(
psser.mode(dropna=False).sort_values().reset_index(drop=True),
pser.mode(dropna=False).sort_values().reset_index(drop=True),
)
def test_rmod(self):
pser = pd.Series([100, None, -300, None, 500, -700], name="Koalas")
psser = ps.from_pandas(pser)
self.assert_eq(psser.rmod(-150), pser.rmod(-150))
self.assert_eq(psser.rmod(0), pser.rmod(0))
self.assert_eq(psser.rmod(150), pser.rmod(150))
pdf = pd.DataFrame({"a": [100, None, -300, None, 500, -700], "b": [150] * 6})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.a.rmod(psdf.b), pdf.a.rmod(pdf.b))
def test_asof(self):
pser = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40], name="Koalas")
psser = ps.from_pandas(pser)
self.assert_eq(psser.asof(20), pser.asof(20))
self.assert_eq(psser.asof([5, 20]).sort_index(), pser.asof([5, 20]).sort_index())
self.assert_eq(psser.asof(100), pser.asof(100))
self.assert_eq(repr(psser.asof(-100)), repr(pser.asof(-100)))
self.assert_eq(psser.asof([-100, 100]).sort_index(), pser.asof([-100, 100]).sort_index())
# where cannot be an Index, Series or a DataFrame
self.assertRaises(ValueError, lambda: psser.asof(ps.Index([-100, 100])))
self.assertRaises(ValueError, lambda: psser.asof(ps.Series([-100, 100])))
self.assertRaises(ValueError, lambda: psser.asof(ps.DataFrame({"A": [1, 2, 3]})))
# asof is not supported for a MultiIndex
pser.index = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c"), ("y", "d")])
psser = ps.from_pandas(pser)
self.assertRaises(ValueError, lambda: psser.asof(20))
# asof requires a sorted index (More precisely, should be a monotonic increasing)
psser = ps.Series([1, 2, np.nan, 4], index=[10, 30, 20, 40], name="Koalas")
self.assertRaises(ValueError, lambda: psser.asof(20))
psser = ps.Series([1, 2, np.nan, 4], index=[40, 30, 20, 10], name="Koalas")
self.assertRaises(ValueError, lambda: psser.asof(20))
pidx = pd.DatetimeIndex(["2013-12-31", "2014-01-02", "2014-01-03"])
pser = pd.Series([1, 2, np.nan], index=pidx)
psser = ps.from_pandas(pser)
self.assert_eq(psser.asof("2014-01-01"), pser.asof("2014-01-01"))
self.assert_eq(psser.asof("2014-01-02"), pser.asof("2014-01-02"))
self.assert_eq(repr(psser.asof("1999-01-02")), repr(pser.asof("1999-01-02")))
def test_squeeze(self):
# Single value
pser = pd.Series([90])
psser = ps.from_pandas(pser)
self.assert_eq(psser.squeeze(), pser.squeeze())
# Single value with MultiIndex
midx = pd.MultiIndex.from_tuples([("a", "b", "c")])
pser = pd.Series([90], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(psser.squeeze(), pser.squeeze())
# Multiple values
pser = pd.Series([90, 91, 85])
psser = ps.from_pandas(pser)
self.assert_eq(psser.squeeze(), pser.squeeze())
# Multiple values with MultiIndex
midx = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
pser = pd.Series([90, 91, 85], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(psser.squeeze(), pser.squeeze())
def test_swaplevel(self):
# MultiIndex with two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pser = pd.Series(["a", "b", "c", "d"], index=pidx)
psser = ps.from_pandas(pser)
self.assert_eq(pser.swaplevel(), psser.swaplevel())
self.assert_eq(pser.swaplevel(0, 1), psser.swaplevel(0, 1))
self.assert_eq(pser.swaplevel(1, 1), psser.swaplevel(1, 1))
self.assert_eq(pser.swaplevel("number", "color"), psser.swaplevel("number", "color"))
# MultiIndex with more than two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"], ["l", "m", "s", "xs"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size"))
pser = pd.Series(["a", "b", "c", "d"], index=pidx)
psser = ps.from_pandas(pser)
self.assert_eq(pser.swaplevel(), psser.swaplevel())
self.assert_eq(pser.swaplevel(0, 1), psser.swaplevel(0, 1))
self.assert_eq(pser.swaplevel(0, 2), psser.swaplevel(0, 2))
self.assert_eq(pser.swaplevel(1, 2), psser.swaplevel(1, 2))
self.assert_eq(pser.swaplevel(1, 1), psser.swaplevel(1, 1))
self.assert_eq(pser.swaplevel(-1, -2), psser.swaplevel(-1, -2))
self.assert_eq(pser.swaplevel("number", "color"), psser.swaplevel("number", "color"))
self.assert_eq(pser.swaplevel("number", "size"), psser.swaplevel("number", "size"))
self.assert_eq(pser.swaplevel("color", "size"), psser.swaplevel("color", "size"))
# Error conditions
self.assertRaises(AssertionError, lambda: ps.Series([1, 2]).swaplevel())
self.assertRaises(IndexError, lambda: psser.swaplevel(0, 9))
self.assertRaises(KeyError, lambda: psser.swaplevel("not_number", "color"))
self.assertRaises(AssertionError, lambda: psser.swaplevel(copy=False))
def test_swapaxes(self):
pser = pd.Series([1, 2, 3], index=["x", "y", "z"], name="ser")
psser = ps.from_pandas(pser)
self.assert_eq(psser.swapaxes(0, 0), pser.swapaxes(0, 0))
self.assert_eq(psser.swapaxes("index", "index"), pser.swapaxes("index", "index"))
self.assert_eq((psser + 1).swapaxes(0, 0), (pser + 1).swapaxes(0, 0))
self.assertRaises(AssertionError, lambda: psser.swapaxes(0, 1, copy=False))
self.assertRaises(ValueError, lambda: psser.swapaxes(0, 1))
self.assertRaises(ValueError, lambda: psser.swapaxes("index", "columns"))
def test_div_zero_and_nan(self):
pser = pd.Series([100, None, -300, None, 500, -700, np.inf, -np.inf], name="Koalas")
psser = ps.from_pandas(pser)
self.assert_eq(pser.div(0), psser.div(0))
self.assert_eq(pser.truediv(0), psser.truediv(0))
self.assert_eq(pser / 0, psser / 0)
self.assert_eq(pser.div(np.nan), psser.div(np.nan))
self.assert_eq(pser.truediv(np.nan), psser.truediv(np.nan))
self.assert_eq(pser / np.nan, psser / np.nan)
# floordiv has different behavior in pandas > 1.0.0 when divide by 0
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
self.assert_eq(pser.floordiv(0), psser.floordiv(0))
self.assert_eq(pser // 0, psser // 0)
else:
result = pd.Series(
[np.inf, np.nan, -np.inf, np.nan, np.inf, -np.inf, np.inf, -np.inf], name="Koalas"
)
self.assert_eq(psser.floordiv(0), result)
self.assert_eq(psser // 0, result)
self.assert_eq(pser.floordiv(np.nan), psser.floordiv(np.nan))
def test_mad(self):
pser = pd.Series([1, 2, 3, 4], name="Koalas")
psser = ps.from_pandas(pser)
self.assert_eq(pser.mad(), psser.mad())
pser = pd.Series([None, -2, 5, 10, 50, np.nan, -20], name="Koalas")
psser = ps.from_pandas(pser)
self.assert_eq(pser.mad(), psser.mad())
pmidx = pd.MultiIndex.from_tuples(
[("a", "1"), ("a", "2"), ("b", "1"), ("b", "2"), ("c", "1")]
)
pser = pd.Series([1, 2, 3, 4, 5], name="Koalas")
pser.index = pmidx
psser = ps.from_pandas(pser)
self.assert_eq(pser.mad(), psser.mad())
pmidx = pd.MultiIndex.from_tuples(
[("a", "1"), ("a", "2"), ("b", "1"), ("b", "2"), ("c", "1")]
)
pser = pd.Series([None, -2, 5, 50, np.nan], name="Koalas")
pser.index = pmidx
psser = ps.from_pandas(pser)
self.assert_eq(pser.mad(), psser.mad())
def test_to_frame(self):
pser = pd.Series(["a", "b", "c"])
psser = ps.from_pandas(pser)
self.assert_eq(pser.to_frame(name="a"), psser.to_frame(name="a"))
# for MultiIndex
midx = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
pser = pd.Series(["a", "b", "c"], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(pser.to_frame(name="a"), psser.to_frame(name="a"))
def test_shape(self):
pser = pd.Series(["a", "b", "c"])
psser = ps.from_pandas(pser)
self.assert_eq(pser.shape, psser.shape)
# for MultiIndex
midx = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
pser = pd.Series(["a", "b", "c"], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(pser.shape, psser.shape)
@unittest.skipIf(not have_tabulate, tabulate_requirement_message)
def test_to_markdown(self):
pser = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal")
psser = ps.from_pandas(pser)
# `to_markdown()` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
self.assertRaises(NotImplementedError, lambda: psser.to_markdown())
else:
self.assert_eq(pser.to_markdown(), psser.to_markdown())
def test_unstack(self):
pser = pd.Series(
[10, -2, 4, 7],
index=pd.MultiIndex.from_tuples(
[("one", "a", "z"), ("one", "b", "x"), ("two", "a", "c"), ("two", "b", "v")],
names=["A", "B", "C"],
),
)
psser = ps.from_pandas(pser)
levels = [-3, -2, -1, 0, 1, 2]
for level in levels:
pandas_result = pser.unstack(level=level)
pandas_on_spark_result = psser.unstack(level=level).sort_index()
self.assert_eq(pandas_result, pandas_on_spark_result)
self.assert_eq(pandas_result.index.names, pandas_on_spark_result.index.names)
self.assert_eq(pandas_result.columns.names, pandas_on_spark_result.columns.names)
# non-numeric datatypes
pser = pd.Series(
list("abcd"), index=pd.MultiIndex.from_product([["one", "two"], ["a", "b"]])
)
psser = ps.from_pandas(pser)
levels = [-2, -1, 0, 1]
for level in levels:
pandas_result = pser.unstack(level=level)
pandas_on_spark_result = psser.unstack(level=level).sort_index()
self.assert_eq(pandas_result, pandas_on_spark_result)
self.assert_eq(pandas_result.index.names, pandas_on_spark_result.index.names)
self.assert_eq(pandas_result.columns.names, pandas_on_spark_result.columns.names)
# Exceeding the range of level
self.assertRaises(IndexError, lambda: psser.unstack(level=3))
self.assertRaises(IndexError, lambda: psser.unstack(level=-4))
# Only support for MultiIndex
psser = ps.Series([10, -2, 4, 7])
self.assertRaises(ValueError, lambda: psser.unstack())
def test_item(self):
psser = ps.Series([10, 20])
self.assertRaises(ValueError, lambda: psser.item())
def test_filter(self):
pser = pd.Series([0, 1, 2], index=["one", "two", "three"])
psser = ps.from_pandas(pser)
self.assert_eq(pser.filter(items=["one", "three"]), psser.filter(items=["one", "three"]))
self.assert_eq(pser.filter(regex="e$"), psser.filter(regex="e$"))
self.assert_eq(pser.filter(like="hre"), psser.filter(like="hre"))
with self.assertRaisesRegex(ValueError, "Series does not support columns axis."):
psser.filter(like="hre", axis=1)
# for MultiIndex
midx = pd.MultiIndex.from_tuples([("one", "x"), ("two", "y"), ("three", "z")])
pser = pd.Series([0, 1, 2], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(
pser.filter(items=[("one", "x"), ("three", "z")]),
psser.filter(items=[("one", "x"), ("three", "z")]),
)
with self.assertRaisesRegex(TypeError, "Unsupported type list"):
psser.filter(items=[["one", "x"], ("three", "z")])
with self.assertRaisesRegex(ValueError, "The item should not be empty."):
psser.filter(items=[(), ("three", "z")])
def test_abs(self):
pser = pd.Series([-2, -1, 0, 1])
psser = ps.from_pandas(pser)
self.assert_eq(abs(psser), abs(pser))
self.assert_eq(np.abs(psser), np.abs(pser))
def test_bfill(self):
pdf = pd.DataFrame({"x": [np.nan, 2, 3, 4, np.nan, 6], "y": [np.nan, 2, 3, 4, np.nan, 6]})
psdf = ps.from_pandas(pdf)
pser = pdf.x
psser = psdf.x
self.assert_eq(psser.bfill(), pser.bfill())
self.assert_eq(psser.bfill()[0], pser.bfill()[0])
psser.bfill(inplace=True)
pser.bfill(inplace=True)
self.assert_eq(psser, pser)
self.assert_eq(psser[0], pser[0])
self.assert_eq(psdf, pdf)
def test_ffill(self):
pdf = pd.DataFrame({"x": [np.nan, 2, 3, 4, np.nan, 6], "y": [np.nan, 2, 3, 4, np.nan, 6]})
psdf = ps.from_pandas(pdf)
pser = pdf.x
psser = psdf.x
self.assert_eq(psser.ffill(), pser.ffill())
self.assert_eq(psser.ffill()[4], pser.ffill()[4])
psser.ffill(inplace=True)
pser.ffill(inplace=True)
self.assert_eq(psser, pser)
self.assert_eq(psser[4], pser[4])
self.assert_eq(psdf, pdf)
def test_iteritems(self):
pser = pd.Series(["A", "B", "C"])
psser = ps.from_pandas(pser)
for (p_name, p_items), (k_name, k_items) in zip(pser.iteritems(), psser.iteritems()):
self.assert_eq(p_name, k_name)
self.assert_eq(p_items, k_items)
def test_droplevel(self):
# droplevel is new in pandas 0.24.0
if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"):
pser = pd.Series(
[1, 2, 3],
index=pd.MultiIndex.from_tuples(
[("x", "a", "q"), ("x", "b", "w"), ("y", "c", "e")],
names=["level_1", "level_2", "level_3"],
),
)
psser = ps.from_pandas(pser)
self.assert_eq(pser.droplevel(0), psser.droplevel(0))
self.assert_eq(pser.droplevel("level_1"), psser.droplevel("level_1"))
self.assert_eq(pser.droplevel(-1), psser.droplevel(-1))
self.assert_eq(pser.droplevel([0]), psser.droplevel([0]))
self.assert_eq(pser.droplevel(["level_1"]), psser.droplevel(["level_1"]))
self.assert_eq(pser.droplevel((0,)), psser.droplevel((0,)))
self.assert_eq(pser.droplevel(("level_1",)), psser.droplevel(("level_1",)))
self.assert_eq(pser.droplevel([0, 2]), psser.droplevel([0, 2]))
self.assert_eq(
pser.droplevel(["level_1", "level_3"]), psser.droplevel(["level_1", "level_3"])
)
self.assert_eq(pser.droplevel((1, 2)), psser.droplevel((1, 2)))
self.assert_eq(
pser.droplevel(("level_2", "level_3")), psser.droplevel(("level_2", "level_3"))
)
with self.assertRaisesRegex(KeyError, "Level {0, 1, 2} not found"):
psser.droplevel({0, 1, 2})
with self.assertRaisesRegex(KeyError, "Level level_100 not found"):
psser.droplevel(["level_1", "level_100"])
with self.assertRaisesRegex(
IndexError, "Too many levels: Index has only 3 levels, not 11"
):
psser.droplevel(10)
with self.assertRaisesRegex(
IndexError,
"Too many levels: Index has only 3 levels, -10 is not a valid level number",
):
psser.droplevel(-10)
with self.assertRaisesRegex(
ValueError,
"Cannot remove 3 levels from an index with 3 levels: "
"at least one level must be left.",
):
psser.droplevel([0, 1, 2])
with self.assertRaisesRegex(
ValueError,
"Cannot remove 5 levels from an index with 3 levels: "
"at least one level must be left.",
):
psser.droplevel([1, 1, 1, 1, 1])
# Tupled names
pser.index.names = [("a", "1"), ("b", "2"), ("c", "3")]
psser = ps.from_pandas(pser)
self.assert_eq(
pser.droplevel([("a", "1"), ("c", "3")]), psser.droplevel([("a", "1"), ("c", "3")])
)
def test_dot(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
psdf = ps.from_pandas(pdf)
self.assert_eq((psdf["b"] * 10).dot(psdf["a"]), (pdf["b"] * 10).dot(pdf["a"]))
self.assert_eq((psdf["b"] * 10).dot(psdf), (pdf["b"] * 10).dot(pdf))
self.assert_eq((psdf["b"] * 10).dot(psdf + 1), (pdf["b"] * 10).dot(pdf + 1))
def test_tail(self):
pser = pd.Series(range(1000), name="Koalas")
psser = ps.from_pandas(pser)
self.assert_eq(pser.tail(), psser.tail())
self.assert_eq(pser.tail(10), psser.tail(10))
self.assert_eq(pser.tail(-990), psser.tail(-990))
self.assert_eq(pser.tail(0), psser.tail(0))
self.assert_eq(pser.tail(1001), psser.tail(1001))
self.assert_eq(pser.tail(-1001), psser.tail(-1001))
self.assert_eq((pser + 1).tail(), (psser + 1).tail())
self.assert_eq((pser + 1).tail(10), (psser + 1).tail(10))
self.assert_eq((pser + 1).tail(-990), (psser + 1).tail(-990))
self.assert_eq((pser + 1).tail(0), (psser + 1).tail(0))
self.assert_eq((pser + 1).tail(1001), (psser + 1).tail(1001))
self.assert_eq((pser + 1).tail(-1001), (psser + 1).tail(-1001))
with self.assertRaisesRegex(TypeError, "bad operand type for unary -: 'str'"):
psser.tail("10")
def test_product(self):
pser = pd.Series([10, 20, 30, 40, 50])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(), psser.prod())
# Containing NA values
pser = pd.Series([10, np.nan, 30, np.nan, 50])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(), psser.prod(), almost=True)
# All-NA values
pser = pd.Series([np.nan, np.nan, np.nan])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(), psser.prod())
# Empty Series
pser = pd.Series([])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(), psser.prod())
# Boolean Series
pser = pd.Series([True, True, True])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(), psser.prod())
pser = pd.Series([False, False, False])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(), psser.prod())
pser = pd.Series([True, False, True])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(), psser.prod())
# With `min_count` parameter
pser = pd.Series([10, 20, 30, 40, 50])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(min_count=5), psser.prod(min_count=5))
self.assert_eq(pser.prod(min_count=6), psser.prod(min_count=6))
pser = pd.Series([10, np.nan, 30, np.nan, 50])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(min_count=3), psser.prod(min_count=3), almost=True)
self.assert_eq(pser.prod(min_count=4), psser.prod(min_count=4))
pser = pd.Series([np.nan, np.nan, np.nan])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(min_count=1), psser.prod(min_count=1))
pser = pd.Series([])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(min_count=1), psser.prod(min_count=1))
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
ps.Series(["a", "b", "c"]).prod()
with self.assertRaisesRegex(
TypeError, "Could not convert datetime64\\[ns\\] \\(timestamp\\) to numeric"
):
ps.Series([pd.Timestamp("2016-01-01") for _ in range(3)]).prod()
def test_hasnans(self):
# BooleanType
pser = pd.Series([True, False, True, True])
psser = ps.from_pandas(pser)
self.assert_eq(pser.hasnans, psser.hasnans)
pser = pd.Series([True, False, np.nan, True])
psser = ps.from_pandas(pser)
self.assert_eq(pser.hasnans, psser.hasnans)
# TimestampType
pser = pd.Series([pd.Timestamp("2020-07-30") for _ in range(3)])
psser = ps.from_pandas(pser)
self.assert_eq(pser.hasnans, psser.hasnans)
pser = pd.Series([pd.Timestamp("2020-07-30"), np.nan, pd.Timestamp("2020-07-30")])
psser = ps.from_pandas(pser)
self.assert_eq(pser.hasnans, psser.hasnans)
def test_last_valid_index(self):
pser = pd.Series([250, 1.5, 320, 1, 0.3, None, None, None, None])
psser = ps.from_pandas(pser)
self.assert_eq(pser.last_valid_index(), psser.last_valid_index())
# MultiIndex columns
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser.index = midx
psser = ps.from_pandas(pser)
self.assert_eq(pser.last_valid_index(), psser.last_valid_index())
# Empty Series
pser = pd.Series([])
psser = ps.from_pandas(pser)
self.assert_eq(pser.last_valid_index(), psser.last_valid_index())
def test_first_valid_index(self):
# Empty Series
pser = pd.Series([])
psser = ps.from_pandas(pser)
self.assert_eq(pser.first_valid_index(), psser.first_valid_index())
def test_factorize(self):
pser = pd.Series(["a", "b", "a", "b"])
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize(sort=True)
kcodes, kuniques = psser.factorize()
self.assert_eq(pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
pser = pd.Series([5, 1, 5, 1])
psser = ps.from_pandas(pser)
pcodes, puniques = (pser + 1).factorize(sort=True)
kcodes, kuniques = (psser + 1).factorize()
self.assert_eq(pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
pser = pd.Series(["a", "b", "a", "b"], name="ser", index=["w", "x", "y", "z"])
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize(sort=True)
kcodes, kuniques = psser.factorize()
self.assert_eq(pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
pser = pd.Series(
["a", "b", "a", "b"], index=pd.MultiIndex.from_arrays([[4, 3, 2, 1], [1, 2, 3, 4]])
)
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize(sort=True)
kcodes, kuniques = psser.factorize()
self.assert_eq(pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
#
# Deals with None and np.nan
#
pser = pd.Series(["a", "b", "a", np.nan])
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize(sort=True)
kcodes, kuniques = psser.factorize()
self.assert_eq(pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
pser = pd.Series([1, None, 3, 2, 1])
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize(sort=True)
kcodes, kuniques = psser.factorize()
self.assert_eq(pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
pser = pd.Series(["a", None, "a"])
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize(sort=True)
kcodes, kuniques = psser.factorize()
self.assert_eq(pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
pser = pd.Series([None, np.nan])
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize()
kcodes, kuniques = psser.factorize()
self.assert_eq(pcodes, kcodes.to_list())
# pandas: Float64Index([], dtype='float64')
self.assert_eq(pd.Index([]), kuniques)
pser = pd.Series([np.nan, np.nan])
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize()
kcodes, kuniques = psser.factorize()
self.assert_eq(pcodes, kcodes.to_list())
# pandas: Float64Index([], dtype='float64')
self.assert_eq(pd.Index([]), kuniques)
#
# Deals with na_sentinel
#
# pandas >= 1.1.2 support na_sentinel=None
# pandas >= 0.24 support na_sentinel not to be -1
#
pd_below_1_1_2 = LooseVersion(pd.__version__) < LooseVersion("1.1.2")
pd_below_0_24 = LooseVersion(pd.__version__) < LooseVersion("0.24")
pser = pd.Series(["a", "b", "a", np.nan, None])
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize(sort=True, na_sentinel=-2)
kcodes, kuniques = psser.factorize(na_sentinel=-2)
self.assert_eq([0, 1, 0, -2, -2] if pd_below_0_24 else pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
pcodes, puniques = pser.factorize(sort=True, na_sentinel=2)
kcodes, kuniques = psser.factorize(na_sentinel=2)
self.assert_eq([0, 1, 0, 2, 2] if pd_below_0_24 else pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
if not pd_below_1_1_2:
pcodes, puniques = pser.factorize(sort=True, na_sentinel=None)
kcodes, kuniques = psser.factorize(na_sentinel=None)
self.assert_eq(pcodes.tolist(), kcodes.to_list())
# puniques is Index(['a', 'b', nan], dtype='object')
self.assert_eq(ps.Index(["a", "b", None]), kuniques)
psser = ps.Series([1, 2, np.nan, 4, 5]) # Arrow takes np.nan as null
psser.loc[3] = np.nan # Spark takes np.nan as NaN
kcodes, kuniques = psser.factorize(na_sentinel=None)
pcodes, puniques = psser.to_pandas().factorize(sort=True, na_sentinel=None)
self.assert_eq(pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
def test_pad(self):
pser = pd.Series([np.nan, 2, 3, 4, np.nan, 6], name="x")
psser = ps.from_pandas(pser)
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
self.assert_eq(pser.pad(), psser.pad())
# Test `inplace=True`
pser.pad(inplace=True)
psser.pad(inplace=True)
self.assert_eq(pser, psser)
else:
expected = ps.Series([np.nan, 2, 3, 4, 4, 6], name="x")
self.assert_eq(expected, psser.pad())
# Test `inplace=True`
psser.pad(inplace=True)
self.assert_eq(expected, psser)
def test_explode(self):
if LooseVersion(pd.__version__) >= LooseVersion("0.25"):
pser = pd.Series([[1, 2, 3], [], None, [3, 4]])
psser = ps.from_pandas(pser)
self.assert_eq(pser.explode(), psser.explode(), almost=True)
# MultiIndex
pser.index = pd.MultiIndex.from_tuples([("a", "w"), ("b", "x"), ("c", "y"), ("d", "z")])
psser = ps.from_pandas(pser)
self.assert_eq(pser.explode(), psser.explode(), almost=True)
# non-array type Series
pser = pd.Series([1, 2, 3, 4])
psser = ps.from_pandas(pser)
self.assert_eq(pser.explode(), psser.explode())
else:
pser = pd.Series([[1, 2, 3], [], None, [3, 4]])
psser = ps.from_pandas(pser)
expected = pd.Series([1.0, 2.0, 3.0, None, None, 3.0, 4.0], index=[0, 0, 0, 1, 2, 3, 3])
self.assert_eq(psser.explode(), expected)
# MultiIndex
pser.index = pd.MultiIndex.from_tuples([("a", "w"), ("b", "x"), ("c", "y"), ("d", "z")])
psser = ps.from_pandas(pser)
expected = pd.Series(
[1.0, 2.0, 3.0, None, None, 3.0, 4.0],
index=pd.MultiIndex.from_tuples(
[
("a", "w"),
("a", "w"),
("a", "w"),
("b", "x"),
("c", "y"),
("d", "z"),
("d", "z"),
]
),
)
self.assert_eq(psser.explode(), expected)
# non-array type Series
pser = pd.Series([1, 2, 3, 4])
psser = ps.from_pandas(pser)
expected = pser
self.assert_eq(psser.explode(), expected)
def test_argsort(self):
# Without null values
pser = pd.Series([0, -100, 50, 100, 20], index=["A", "B", "C", "D", "E"])
psser = ps.from_pandas(pser)
self.assert_eq(pser.argsort().sort_index(), psser.argsort().sort_index())
self.assert_eq((-pser).argsort().sort_index(), (-psser).argsort().sort_index())
# MultiIndex
pser.index = pd.MultiIndex.from_tuples(
[("a", "v"), ("b", "w"), ("c", "x"), ("d", "y"), ("e", "z")]
)
psser = ps.from_pandas(pser)
self.assert_eq(pser.argsort().sort_index(), psser.argsort().sort_index())
self.assert_eq((-pser).argsort().sort_index(), (-psser).argsort().sort_index())
# With name
pser.name = "Koalas"
psser = ps.from_pandas(pser)
self.assert_eq(pser.argsort().sort_index(), psser.argsort().sort_index())
self.assert_eq((-pser).argsort().sort_index(), (-psser).argsort().sort_index())
# Series from Index
pidx = pd.Index([4.0, -6.0, 2.0, -100.0, 11.0, 20.0, 1.0, -99.0])
psidx = ps.from_pandas(pidx)
self.assert_eq(
pidx.to_series().argsort().sort_index(), psidx.to_series().argsort().sort_index()
)
self.assert_eq(
(-pidx.to_series()).argsort().sort_index(), (-psidx.to_series()).argsort().sort_index()
)
# Series from Index with name
pidx.name = "Koalas"
psidx = ps.from_pandas(pidx)
self.assert_eq(
pidx.to_series().argsort().sort_index(), psidx.to_series().argsort().sort_index()
)
self.assert_eq(
(-pidx.to_series()).argsort().sort_index(), (-psidx.to_series()).argsort().sort_index()
)
# Series from DataFrame
pdf = pd.DataFrame({"A": [4.0, -6.0, 2.0, np.nan, -100.0, 11.0, 20.0, np.nan, 1.0, -99.0]})
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.A.argsort().sort_index(), psdf.A.argsort().sort_index())
self.assert_eq((-pdf.A).argsort().sort_index(), (-psdf.A).argsort().sort_index())
# With null values
pser = pd.Series([0, -100, np.nan, 100, np.nan], index=["A", "B", "C", "D", "E"])
psser = ps.from_pandas(pser)
self.assert_eq(pser.argsort().sort_index(), psser.argsort().sort_index())
self.assert_eq((-pser).argsort().sort_index(), (-psser).argsort().sort_index())
# MultiIndex with null values
pser.index = pd.MultiIndex.from_tuples(
[("a", "v"), ("b", "w"), ("c", "x"), ("d", "y"), ("e", "z")]
)
psser = ps.from_pandas(pser)
self.assert_eq(pser.argsort().sort_index(), psser.argsort().sort_index())
self.assert_eq((-pser).argsort().sort_index(), (-psser).argsort().sort_index())
# With name with null values
pser.name = "Koalas"
psser = ps.from_pandas(pser)
self.assert_eq(pser.argsort().sort_index(), psser.argsort().sort_index())
self.assert_eq((-pser).argsort().sort_index(), (-psser).argsort().sort_index())
# Series from Index with null values
pidx = pd.Index([4.0, -6.0, 2.0, np.nan, -100.0, 11.0, 20.0, np.nan, 1.0, -99.0])
psidx = ps.from_pandas(pidx)
self.assert_eq(
pidx.to_series().argsort().sort_index(), psidx.to_series().argsort().sort_index()
)
self.assert_eq(
(-pidx.to_series()).argsort().sort_index(), (-psidx.to_series()).argsort().sort_index()
)
# Series from Index with name with null values
pidx.name = "Koalas"
psidx = ps.from_pandas(pidx)
self.assert_eq(
pidx.to_series().argsort().sort_index(), psidx.to_series().argsort().sort_index()
)
self.assert_eq(
(-pidx.to_series()).argsort().sort_index(), (-psidx.to_series()).argsort().sort_index()
)
# Series from DataFrame with null values
pdf = pd.DataFrame({"A": [4.0, -6.0, 2.0, np.nan, -100.0, 11.0, 20.0, np.nan, 1.0, -99.0]})
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.A.argsort().sort_index(), psdf.A.argsort().sort_index())
self.assert_eq((-pdf.A).argsort().sort_index(), (-psdf.A).argsort().sort_index())
def test_argmin_argmax(self):
pser = pd.Series(
{
"Corn Flakes": 100.0,
"Almond Delight": 110.0,
"Cinnamon Toast Crunch": 120.0,
"Cocoa Puff": 110.0,
"Expensive Flakes": 120.0,
"Cheap Flakes": 100.0,
},
name="Koalas",
)
psser = ps.from_pandas(pser)
if LooseVersion(pd.__version__) >= LooseVersion("1.0"):
self.assert_eq(pser.argmin(), psser.argmin())
self.assert_eq(pser.argmax(), psser.argmax())
# MultiIndex
pser.index = pd.MultiIndex.from_tuples(
[("a", "t"), ("b", "u"), ("c", "v"), ("d", "w"), ("e", "x"), ("f", "u")]
)
psser = ps.from_pandas(pser)
self.assert_eq(pser.argmin(), psser.argmin())
self.assert_eq(pser.argmax(), psser.argmax())
# Null Series
self.assert_eq(pd.Series([np.nan]).argmin(), ps.Series([np.nan]).argmin())
self.assert_eq(pd.Series([np.nan]).argmax(), ps.Series([np.nan]).argmax())
else:
self.assert_eq(pser.values.argmin(), psser.argmin())
self.assert_eq(pser.values.argmax(), psser.argmax())
# MultiIndex
pser.index = pd.MultiIndex.from_tuples(
[("a", "t"), ("b", "u"), ("c", "v"), ("d", "w"), ("e", "x"), ("f", "u")]
)
psser = ps.from_pandas(pser)
self.assert_eq(pser.values.argmin(), psser.argmin())
self.assert_eq(pser.values.argmax(), psser.argmax())
# Null Series
self.assert_eq(-1, ps.Series([np.nan]).argmin())
self.assert_eq(-1, ps.Series([np.nan]).argmax())
with self.assertRaisesRegex(ValueError, "attempt to get argmin of an empty sequence"):
ps.Series([]).argmin()
with self.assertRaisesRegex(ValueError, "attempt to get argmax of an empty sequence"):
ps.Series([]).argmax()
def test_backfill(self):
pser = pd.Series([np.nan, 2, 3, 4, np.nan, 6], name="x")
psser = ps.from_pandas(pser)
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
self.assert_eq(pser.backfill(), psser.backfill())
# Test `inplace=True`
pser.backfill(inplace=True)
psser.backfill(inplace=True)
self.assert_eq(pser, psser)
else:
expected = ps.Series([2.0, 2.0, 3.0, 4.0, 6.0, 6.0], name="x")
self.assert_eq(expected, psser.backfill())
# Test `inplace=True`
psser.backfill(inplace=True)
self.assert_eq(expected, psser)
def test_align(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]})
psdf = ps.from_pandas(pdf)
for join in ["outer", "inner", "left", "right"]:
for axis in [None, 0]:
psser_l, psser_r = psdf.a.align(psdf.b, join=join, axis=axis)
pser_l, pser_r = pdf.a.align(pdf.b, join=join, axis=axis)
self.assert_eq(psser_l, pser_l)
self.assert_eq(psser_r, pser_r)
psser_l, psdf_r = psdf.b.align(psdf[["b", "a"]], join=join, axis=axis)
pser_l, pdf_r = pdf.b.align(pdf[["b", "a"]], join=join, axis=axis)
self.assert_eq(psser_l, pser_l)
self.assert_eq(psdf_r, pdf_r)
self.assertRaises(ValueError, lambda: psdf.a.align(psdf.b, axis=1))
def test_pow_and_rpow(self):
pser = pd.Series([1, 2, np.nan])
psser = ps.from_pandas(pser)
self.assert_eq(pser.pow(np.nan), psser.pow(np.nan))
self.assert_eq(pser ** np.nan, psser ** np.nan)
self.assert_eq(pser.rpow(np.nan), psser.rpow(np.nan))
self.assert_eq(1 ** pser, 1 ** psser)
def test_between_time(self):
idx = pd.date_range("2018-04-09", periods=4, freq="1D20min")
pser = pd.Series([1, 2, 3, 4], index=idx)
psser = ps.from_pandas(pser)
self.assert_eq(
pser.between_time("0:15", "0:45").sort_index(),
psser.between_time("0:15", "0:45").sort_index(),
)
pser.index.name = "ts"
psser = ps.from_pandas(pser)
self.assert_eq(
pser.between_time("0:15", "0:45").sort_index(),
psser.between_time("0:15", "0:45").sort_index(),
)
pser.index.name = "index"
psser = ps.from_pandas(pser)
self.assert_eq(
pser.between_time("0:15", "0:45").sort_index(),
psser.between_time("0:15", "0:45").sort_index(),
)
def test_at_time(self):
idx = pd.date_range("2018-04-09", periods=4, freq="1D20min")
pser = pd.Series([1, 2, 3, 4], index=idx)
psser = ps.from_pandas(pser)
self.assert_eq(
pser.at_time("0:20").sort_index(),
psser.at_time("0:20").sort_index(),
)
pser.index.name = "ts"
psser = ps.from_pandas(pser)
self.assert_eq(
pser.at_time("0:20").sort_index(),
psser.at_time("0:20").sort_index(),
)
pser.index.name = "index"
psser = ps.from_pandas(pser)
self.assert_eq(
pser.at_time("0:20").sort_index(),
psser.at_time("0:20").sort_index(),
)
if __name__ == "__main__":
from pyspark.pandas.tests.test_series import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
pratapvardhan/pandas | pandas/tests/frame/test_operators.py | 2 | 43613 | # -*- coding: utf-8 -*-
from __future__ import print_function
from collections import deque
from datetime import datetime
from decimal import Decimal
import operator
import pytest
from numpy import nan, random
import numpy as np
from pandas.compat import range
from pandas import compat
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range)
import pandas.core.common as com
import pandas.io.formats.printing as printing
import pandas as pd
from pandas.util.testing import (assert_numpy_array_equal,
assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.tests.frame.common import (TestData, _check_mixed_float,
_check_mixed_int)
class TestDataFrameOperators(TestData):
def test_operators(self):
garbage = random.random(4)
colSeries = Series(garbage, index=np.array(self.frame.columns))
idSum = self.frame + self.frame
seriesSum = self.frame + colSeries
for col, series in compat.iteritems(idSum):
for idx, val in compat.iteritems(series):
origVal = self.frame[col][idx] * 2
if not np.isnan(val):
assert val == origVal
else:
assert np.isnan(origVal)
for col, series in compat.iteritems(seriesSum):
for idx, val in compat.iteritems(series):
origVal = self.frame[col][idx] + colSeries[col]
if not np.isnan(val):
assert val == origVal
else:
assert np.isnan(origVal)
added = self.frame2 + self.frame2
expected = self.frame2 * 2
assert_frame_equal(added, expected)
df = DataFrame({'a': ['a', None, 'b']})
assert_frame_equal(df + df, DataFrame({'a': ['aa', np.nan, 'bb']}))
# Test for issue #10181
for dtype in ('float', 'int64'):
frames = [
DataFrame(dtype=dtype),
DataFrame(columns=['A'], dtype=dtype),
DataFrame(index=[0], dtype=dtype),
]
for df in frames:
assert (df + df).equals(df)
assert_frame_equal(df + df, df)
def test_ops_np_scalar(self):
vals, xs = np.random.rand(5, 3), [nan, 7, -23, 2.718, -3.14, np.inf]
f = lambda x: DataFrame(x, index=list('ABCDE'),
columns=['jim', 'joe', 'jolie'])
df = f(vals)
for x in xs:
assert_frame_equal(df / np.array(x), f(vals / x))
assert_frame_equal(np.array(x) * df, f(vals * x))
assert_frame_equal(df + np.array(x), f(vals + x))
assert_frame_equal(np.array(x) - df, f(x - vals))
def test_operators_boolean(self):
# GH 5808
# empty frames, non-mixed dtype
result = DataFrame(index=[1]) & DataFrame(index=[1])
assert_frame_equal(result, DataFrame(index=[1]))
result = DataFrame(index=[1]) | DataFrame(index=[1])
assert_frame_equal(result, DataFrame(index=[1]))
result = DataFrame(index=[1]) & DataFrame(index=[1, 2])
assert_frame_equal(result, DataFrame(index=[1, 2]))
result = DataFrame(index=[1], columns=['A']) & DataFrame(
index=[1], columns=['A'])
assert_frame_equal(result, DataFrame(index=[1], columns=['A']))
result = DataFrame(True, index=[1], columns=['A']) & DataFrame(
True, index=[1], columns=['A'])
assert_frame_equal(result, DataFrame(True, index=[1], columns=['A']))
result = DataFrame(True, index=[1], columns=['A']) | DataFrame(
True, index=[1], columns=['A'])
assert_frame_equal(result, DataFrame(True, index=[1], columns=['A']))
# boolean ops
result = DataFrame(1, index=[1], columns=['A']) | DataFrame(
True, index=[1], columns=['A'])
assert_frame_equal(result, DataFrame(1, index=[1], columns=['A']))
def f():
DataFrame(1.0, index=[1], columns=['A']) | DataFrame(
True, index=[1], columns=['A'])
pytest.raises(TypeError, f)
def f():
DataFrame('foo', index=[1], columns=['A']) | DataFrame(
True, index=[1], columns=['A'])
pytest.raises(TypeError, f)
def test_operators_none_as_na(self):
df = DataFrame({"col1": [2, 5.0, 123, None],
"col2": [1, 2, 3, 4]}, dtype=object)
ops = [operator.add, operator.sub, operator.mul, operator.truediv]
# since filling converts dtypes from object, changed expected to be
# object
for op in ops:
filled = df.fillna(np.nan)
result = op(df, 3)
expected = op(filled, 3).astype(object)
expected[com.isna(expected)] = None
assert_frame_equal(result, expected)
result = op(df, df)
expected = op(filled, filled).astype(object)
expected[com.isna(expected)] = None
assert_frame_equal(result, expected)
result = op(df, df.fillna(7))
assert_frame_equal(result, expected)
result = op(df.fillna(7), df)
assert_frame_equal(result, expected, check_dtype=False)
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
pytest.raises(TypeError, lambda: x == y)
pytest.raises(TypeError, lambda: x != y)
pytest.raises(TypeError, lambda: x >= y)
pytest.raises(TypeError, lambda: x > y)
pytest.raises(TypeError, lambda: x < y)
pytest.raises(TypeError, lambda: x <= y)
# GH4968
# invalid date/int comparisons
df = DataFrame(np.random.randint(10, size=(10, 1)), columns=['a'])
df['dates'] = date_range('20010101', periods=len(df))
df2 = df.copy()
df2['dates'] = df['a']
check(df, df2)
df = DataFrame(np.random.randint(10, size=(10, 2)), columns=['a', 'b'])
df2 = DataFrame({'a': date_range('20010101', periods=len(
df)), 'b': date_range('20100101', periods=len(df))})
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH4982
df = DataFrame({'dates1': date_range('20010101', periods=10),
'dates2': date_range('20010102', periods=10),
'intcol': np.random.randint(1000000000, size=10),
'floatcol': np.random.randn(10),
'stringcol': list(tm.rands(10))})
df.loc[np.random.rand(len(df)) > 0.5, 'dates2'] = pd.NaT
ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',
'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
expected = left_f(df, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), df)
assert_frame_equal(result, expected)
# nats
expected = left_f(df, Timestamp('nat'))
result = right_f(Timestamp('nat'), df)
assert_frame_equal(result, expected)
def test_logical_operators(self):
def _check_bin_op(op):
result = op(df1, df2)
expected = DataFrame(op(df1.values, df2.values), index=df1.index,
columns=df1.columns)
assert result.values.dtype == np.bool_
assert_frame_equal(result, expected)
def _check_unary_op(op):
result = op(df1)
expected = DataFrame(op(df1.values), index=df1.index,
columns=df1.columns)
assert result.values.dtype == np.bool_
assert_frame_equal(result, expected)
df1 = {'a': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},
'b': {'a': False, 'b': True, 'c': False,
'd': False, 'e': False},
'c': {'a': False, 'b': False, 'c': True,
'd': False, 'e': False},
'd': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},
'e': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True}}
df2 = {'a': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},
'b': {'a': False, 'b': True, 'c': False,
'd': False, 'e': False},
'c': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},
'd': {'a': False, 'b': False, 'c': False,
'd': True, 'e': False},
'e': {'a': False, 'b': False, 'c': False,
'd': False, 'e': True}}
df1 = DataFrame(df1)
df2 = DataFrame(df2)
_check_bin_op(operator.and_)
_check_bin_op(operator.or_)
_check_bin_op(operator.xor)
# operator.neg is deprecated in numpy >= 1.9
_check_unary_op(operator.inv)
@pytest.mark.parametrize('op,res', [('__eq__', False),
('__ne__', True)])
def test_logical_typeerror_with_non_valid(self, op, res):
# we are comparing floats vs a string
result = getattr(self.frame, op)('foo')
assert bool(result.all().all()) is res
def test_logical_with_nas(self):
d = DataFrame({'a': [np.nan, False], 'b': [True, True]})
# GH4947
# bool comparisons should return bool
result = d['a'] | d['b']
expected = Series([False, True])
assert_series_equal(result, expected)
# GH4604, automatic casting here
result = d['a'].fillna(False) | d['b']
expected = Series([True, True])
assert_series_equal(result, expected)
result = d['a'].fillna(False, downcast=False) | d['b']
expected = Series([True, True])
assert_series_equal(result, expected)
@pytest.mark.parametrize('df,expected', [
(pd.DataFrame({'a': [-1, 1]}), pd.DataFrame({'a': [1, -1]})),
(pd.DataFrame({'a': [False, True]}),
pd.DataFrame({'a': [True, False]})),
(pd.DataFrame({'a': pd.Series(pd.to_timedelta([-1, 1]))}),
pd.DataFrame({'a': pd.Series(pd.to_timedelta([1, -1]))}))
])
def test_neg_numeric(self, df, expected):
assert_frame_equal(-df, expected)
assert_series_equal(-df['a'], expected['a'])
@pytest.mark.parametrize('df, expected', [
(np.array([1, 2], dtype=object), np.array([-1, -2], dtype=object)),
([Decimal('1.0'), Decimal('2.0')], [Decimal('-1.0'), Decimal('-2.0')]),
])
def test_neg_object(self, df, expected):
# GH 21380
df = pd.DataFrame({'a': df})
expected = pd.DataFrame({'a': expected})
assert_frame_equal(-df, expected)
assert_series_equal(-df['a'], expected['a'])
@pytest.mark.parametrize('df', [
pd.DataFrame({'a': ['a', 'b']}),
pd.DataFrame({'a': pd.to_datetime(['2017-01-22', '1970-01-01'])}),
])
def test_neg_raises(self, df):
with pytest.raises(TypeError):
(- df)
with pytest.raises(TypeError):
(- df['a'])
def test_invert(self):
assert_frame_equal(-(self.frame < 0), ~(self.frame < 0))
@pytest.mark.parametrize('df', [
pd.DataFrame({'a': [-1, 1]}),
pd.DataFrame({'a': [False, True]}),
pd.DataFrame({'a': pd.Series(pd.to_timedelta([-1, 1]))}),
])
def test_pos_numeric(self, df):
# GH 16073
assert_frame_equal(+df, df)
assert_series_equal(+df['a'], df['a'])
@pytest.mark.parametrize('df', [
pd.DataFrame({'a': ['a', 'b']}),
pd.DataFrame({'a': np.array([-1, 2], dtype=object)}),
pd.DataFrame({'a': [Decimal('-1.0'), Decimal('2.0')]}),
])
def test_pos_object(self, df):
# GH 21380
assert_frame_equal(+df, df)
assert_series_equal(+df['a'], df['a'])
@pytest.mark.parametrize('df', [
pd.DataFrame({'a': pd.to_datetime(['2017-01-22', '1970-01-01'])}),
])
def test_pos_raises(self, df):
with pytest.raises(TypeError):
(+ df)
with pytest.raises(TypeError):
(+ df['a'])
def test_arith_flex_frame(self):
ops = ['add', 'sub', 'mul', 'div', 'truediv', 'pow', 'floordiv', 'mod']
if not compat.PY3:
aliases = {}
else:
aliases = {'div': 'truediv'}
for op in ops:
try:
alias = aliases.get(op, op)
f = getattr(operator, alias)
result = getattr(self.frame, op)(2 * self.frame)
exp = f(self.frame, 2 * self.frame)
assert_frame_equal(result, exp)
# vs mix float
result = getattr(self.mixed_float, op)(2 * self.mixed_float)
exp = f(self.mixed_float, 2 * self.mixed_float)
assert_frame_equal(result, exp)
_check_mixed_float(result, dtype=dict(C=None))
# vs mix int
if op in ['add', 'sub', 'mul']:
result = getattr(self.mixed_int, op)(2 + self.mixed_int)
exp = f(self.mixed_int, 2 + self.mixed_int)
# no overflow in the uint
dtype = None
if op in ['sub']:
dtype = dict(B='uint64', C=None)
elif op in ['add', 'mul']:
dtype = dict(C=None)
assert_frame_equal(result, exp)
_check_mixed_int(result, dtype=dtype)
# rops
r_f = lambda x, y: f(y, x)
result = getattr(self.frame, 'r' + op)(2 * self.frame)
exp = r_f(self.frame, 2 * self.frame)
assert_frame_equal(result, exp)
# vs mix float
result = getattr(self.mixed_float, op)(
2 * self.mixed_float)
exp = f(self.mixed_float, 2 * self.mixed_float)
assert_frame_equal(result, exp)
_check_mixed_float(result, dtype=dict(C=None))
result = getattr(self.intframe, op)(2 * self.intframe)
exp = f(self.intframe, 2 * self.intframe)
assert_frame_equal(result, exp)
# vs mix int
if op in ['add', 'sub', 'mul']:
result = getattr(self.mixed_int, op)(
2 + self.mixed_int)
exp = f(self.mixed_int, 2 + self.mixed_int)
# no overflow in the uint
dtype = None
if op in ['sub']:
dtype = dict(B='uint64', C=None)
elif op in ['add', 'mul']:
dtype = dict(C=None)
assert_frame_equal(result, exp)
_check_mixed_int(result, dtype=dtype)
except:
printing.pprint_thing("Failing operation %r" % op)
raise
# ndim >= 3
ndim_5 = np.ones(self.frame.shape + (3, 4, 5))
msg = "Unable to coerce to Series/DataFrame"
with tm.assert_raises_regex(ValueError, msg):
f(self.frame, ndim_5)
with tm.assert_raises_regex(ValueError, msg):
getattr(self.frame, op)(ndim_5)
# res_add = self.frame.add(self.frame)
# res_sub = self.frame.sub(self.frame)
# res_mul = self.frame.mul(self.frame)
# res_div = self.frame.div(2 * self.frame)
# assert_frame_equal(res_add, self.frame + self.frame)
# assert_frame_equal(res_sub, self.frame - self.frame)
# assert_frame_equal(res_mul, self.frame * self.frame)
# assert_frame_equal(res_div, self.frame / (2 * self.frame))
const_add = self.frame.add(1)
assert_frame_equal(const_add, self.frame + 1)
# corner cases
result = self.frame.add(self.frame[:0])
assert_frame_equal(result, self.frame * np.nan)
result = self.frame[:0].add(self.frame)
assert_frame_equal(result, self.frame * np.nan)
with tm.assert_raises_regex(NotImplementedError, 'fill_value'):
self.frame.add(self.frame.iloc[0], fill_value=3)
with tm.assert_raises_regex(NotImplementedError, 'fill_value'):
self.frame.add(self.frame.iloc[0], axis='index', fill_value=3)
def test_arith_flex_zero_len_raises(self):
# GH#19522 passing fill_value to frame flex arith methods should
# raise even in the zero-length special cases
ser_len0 = pd.Series([])
df_len0 = pd.DataFrame([], columns=['A', 'B'])
df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
with tm.assert_raises_regex(NotImplementedError, 'fill_value'):
df.add(ser_len0, fill_value='E')
with tm.assert_raises_regex(NotImplementedError, 'fill_value'):
df_len0.sub(df['A'], axis=None, fill_value=3)
def test_binary_ops_align(self):
# test aligning binary ops
# GH 6681
index = MultiIndex.from_product([list('abc'),
['one', 'two', 'three'],
[1, 2, 3]],
names=['first', 'second', 'third'])
df = DataFrame(np.arange(27 * 3).reshape(27, 3),
index=index,
columns=['value1', 'value2', 'value3']).sort_index()
idx = pd.IndexSlice
for op in ['add', 'sub', 'mul', 'div', 'truediv']:
opa = getattr(operator, op, None)
if opa is None:
continue
x = Series([1.0, 10.0, 100.0], [1, 2, 3])
result = getattr(df, op)(x, level='third', axis=0)
expected = pd.concat([opa(df.loc[idx[:, :, i], :], v)
for i, v in x.iteritems()]).sort_index()
assert_frame_equal(result, expected)
x = Series([1.0, 10.0], ['two', 'three'])
result = getattr(df, op)(x, level='second', axis=0)
expected = (pd.concat([opa(df.loc[idx[:, i], :], v)
for i, v in x.iteritems()])
.reindex_like(df).sort_index())
assert_frame_equal(result, expected)
# GH9463 (alignment level of dataframe with series)
midx = MultiIndex.from_product([['A', 'B'], ['a', 'b']])
df = DataFrame(np.ones((2, 4), dtype='int64'), columns=midx)
s = pd.Series({'a': 1, 'b': 2})
df2 = df.copy()
df2.columns.names = ['lvl0', 'lvl1']
s2 = s.copy()
s2.index.name = 'lvl1'
# different cases of integer/string level names:
res1 = df.mul(s, axis=1, level=1)
res2 = df.mul(s2, axis=1, level=1)
res3 = df2.mul(s, axis=1, level=1)
res4 = df2.mul(s2, axis=1, level=1)
res5 = df2.mul(s, axis=1, level='lvl1')
res6 = df2.mul(s2, axis=1, level='lvl1')
exp = DataFrame(np.array([[1, 2, 1, 2], [1, 2, 1, 2]], dtype='int64'),
columns=midx)
for res in [res1, res2]:
assert_frame_equal(res, exp)
exp.columns.names = ['lvl0', 'lvl1']
for res in [res3, res4, res5, res6]:
assert_frame_equal(res, exp)
def test_arith_mixed(self):
left = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 2, 3]})
result = left + left
expected = DataFrame({'A': ['aa', 'bb', 'cc'],
'B': [2, 4, 6]})
assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = DataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]})
self._test_op(df, operator.add)
self._test_op(df, operator.sub)
self._test_op(df, operator.mul)
self._test_op(df, operator.truediv)
self._test_op(df, operator.floordiv)
self._test_op(df, operator.pow)
self._test_op(df, lambda x, y: y + x)
self._test_op(df, lambda x, y: y - x)
self._test_op(df, lambda x, y: y * x)
self._test_op(df, lambda x, y: y / x)
self._test_op(df, lambda x, y: y ** x)
self._test_op(df, lambda x, y: x + y)
self._test_op(df, lambda x, y: x - y)
self._test_op(df, lambda x, y: x * y)
self._test_op(df, lambda x, y: x / y)
self._test_op(df, lambda x, y: x ** y)
@staticmethod
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
assert_series_equal(result[col], op(df[col], 1))
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = DataFrame(data)
other = DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ['eq', 'ne', 'gt', 'lt', 'ge', 'le']:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
assert_frame_equal(f(other.values), o(df, other.values))
# scalar
assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
assert_frame_equal(f(np.nan), o(df, np.nan))
with tm.assert_raises_regex(ValueError, msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
assert_frame_equal(col_eq, df == Series(col_ser))
assert_frame_equal(col_eq, -col_ne)
assert_frame_equal(idx_eq, -idx_ne)
assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
assert_frame_equal(col_eq, df.eq(list(col_ser)))
assert_frame_equal(idx_eq, df.eq(Series(idx_ser), axis=0))
assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
assert_frame_equal(col_gt, df > Series(col_ser))
assert_frame_equal(col_gt, -col_le)
assert_frame_equal(idx_gt, -idx_le)
assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
assert_frame_equal(col_ge, df >= Series(col_ser))
assert_frame_equal(col_ge, -col_lt)
assert_frame_equal(idx_ge, -idx_lt)
assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = Series(np.random.randn(5))
col_ser = Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = DataFrame({'a': arr})
df2 = DataFrame({'a': arr2})
rs = df.gt(df2)
assert not rs.values.any()
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = DataFrame({'a': arr3})
rs = df3.gt(2j)
assert not rs.values.any()
# corner, dtype=object
df1 = DataFrame({'col': ['foo', np.nan, 'bar']})
df2 = DataFrame({'col': ['foo', datetime.now(), 'bar']})
result = df1.ne(df2)
exp = DataFrame({'col': [False, True, False]})
assert_frame_equal(result, exp)
def test_dti_tz_convert_to_utc(self):
base = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz='UTC')
idx1 = base.tz_convert('Asia/Tokyo')[:2]
idx2 = base.tz_convert('US/Eastern')[1:]
df1 = DataFrame({'A': [1, 2]}, index=idx1)
df2 = DataFrame({'A': [1, 1]}, index=idx2)
exp = DataFrame({'A': [np.nan, 3, np.nan]}, index=base)
assert_frame_equal(df1 + df2, exp)
def test_arith_flex_series(self):
df = self.simple
row = df.xs('a')
col = df['two']
# after arithmetic refactor, add truediv here
ops = ['add', 'sub', 'mul', 'mod']
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
assert_frame_equal(f(row), op(df, row))
assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
assert_frame_equal(df.div(row), df / row)
assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH7325
df = DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype='int64')
expected = DataFrame([[nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis='index')
assert_frame_equal(result, expected)
df = DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype='float64')
expected = DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis='index')
assert_frame_equal(result, expected)
def test_arith_non_pandas_object(self):
df = self.simple
val1 = df.xs('a').values
added = DataFrame(df.values + val1, index=df.index, columns=df.columns)
assert_frame_equal(df + val1, added)
added = DataFrame((df.values.T + val1).T,
index=df.index, columns=df.columns)
assert_frame_equal(df.add(val1, axis=0), added)
val2 = list(df['two'])
added = DataFrame(df.values + val2, index=df.index, columns=df.columns)
assert_frame_equal(df + val2, added)
added = DataFrame((df.values.T + val2).T, index=df.index,
columns=df.columns)
assert_frame_equal(df.add(val2, axis='index'), added)
val3 = np.random.rand(*df.shape)
added = DataFrame(df.values + val3, index=df.index, columns=df.columns)
assert_frame_equal(df.add(val3), added)
@pytest.mark.parametrize('values', [[1, 2], (1, 2), np.array([1, 2]),
range(1, 3), deque([1, 2])])
def test_arith_alignment_non_pandas_object(self, values):
# GH 17901
df = DataFrame({'A': [1, 1], 'B': [1, 1]})
expected = DataFrame({'A': [2, 2], 'B': [3, 3]})
result = df + values
assert_frame_equal(result, expected)
def test_combineFrame(self):
frame_copy = self.frame.reindex(self.frame.index[::2])
del frame_copy['D']
frame_copy['C'][:5] = nan
added = self.frame + frame_copy
indexer = added['A'].dropna().index
exp = (self.frame['A'] * 2).copy()
tm.assert_series_equal(added['A'].dropna(), exp.loc[indexer])
exp.loc[~exp.index.isin(indexer)] = np.nan
tm.assert_series_equal(added['A'], exp.loc[added['A'].index])
assert np.isnan(added['C'].reindex(frame_copy.index)[:5]).all()
# assert(False)
assert np.isnan(added['D']).all()
self_added = self.frame + self.frame
tm.assert_index_equal(self_added.index, self.frame.index)
added_rev = frame_copy + self.frame
assert np.isnan(added['D']).all()
assert np.isnan(added_rev['D']).all()
# corner cases
# empty
plus_empty = self.frame + self.empty
assert np.isnan(plus_empty.values).all()
empty_plus = self.empty + self.frame
assert np.isnan(empty_plus.values).all()
empty_empty = self.empty + self.empty
assert empty_empty.empty
# out of order
reverse = self.frame.reindex(columns=self.frame.columns[::-1])
assert_frame_equal(reverse + self.frame, self.frame * 2)
# mix vs float64, upcast
added = self.frame + self.mixed_float
_check_mixed_float(added, dtype='float64')
added = self.mixed_float + self.frame
_check_mixed_float(added, dtype='float64')
# mix vs mix
added = self.mixed_float + self.mixed_float2
_check_mixed_float(added, dtype=dict(C=None))
added = self.mixed_float2 + self.mixed_float
_check_mixed_float(added, dtype=dict(C=None))
# with int
added = self.frame + self.mixed_int
_check_mixed_float(added, dtype='float64')
def test_combineSeries(self):
# Series
series = self.frame.xs(self.frame.index[0])
added = self.frame + series
for key, s in compat.iteritems(added):
assert_series_equal(s, self.frame[key] + series[key])
larger_series = series.to_dict()
larger_series['E'] = 1
larger_series = Series(larger_series)
larger_added = self.frame + larger_series
for key, s in compat.iteritems(self.frame):
assert_series_equal(larger_added[key], s + series[key])
assert 'E' in larger_added
assert np.isnan(larger_added['E']).all()
# no upcast needed
added = self.mixed_float + series
_check_mixed_float(added)
# vs mix (upcast) as needed
added = self.mixed_float + series.astype('float32')
_check_mixed_float(added, dtype=dict(C=None))
added = self.mixed_float + series.astype('float16')
_check_mixed_float(added, dtype=dict(C=None))
# these raise with numexpr.....as we are adding an int64 to an
# uint64....weird vs int
# added = self.mixed_int + (100*series).astype('int64')
# _check_mixed_int(added, dtype = dict(A = 'int64', B = 'float64', C =
# 'int64', D = 'int64'))
# added = self.mixed_int + (100*series).astype('int32')
# _check_mixed_int(added, dtype = dict(A = 'int32', B = 'float64', C =
# 'int32', D = 'int64'))
# TimeSeries
ts = self.tsframe['A']
# 10890
# we no longer allow auto timeseries broadcasting
# and require explicit broadcasting
added = self.tsframe.add(ts, axis='index')
for key, col in compat.iteritems(self.tsframe):
result = col + ts
assert_series_equal(added[key], result, check_names=False)
assert added[key].name == key
if col.name == ts.name:
assert result.name == 'A'
else:
assert result.name is None
smaller_frame = self.tsframe[:-5]
smaller_added = smaller_frame.add(ts, axis='index')
tm.assert_index_equal(smaller_added.index, self.tsframe.index)
smaller_ts = ts[:-5]
smaller_added2 = self.tsframe.add(smaller_ts, axis='index')
assert_frame_equal(smaller_added, smaller_added2)
# length 0, result is all-nan
result = self.tsframe.add(ts[:0], axis='index')
expected = DataFrame(np.nan, index=self.tsframe.index,
columns=self.tsframe.columns)
assert_frame_equal(result, expected)
# Frame is all-nan
result = self.tsframe[:0].add(ts, axis='index')
expected = DataFrame(np.nan, index=self.tsframe.index,
columns=self.tsframe.columns)
assert_frame_equal(result, expected)
# empty but with non-empty index
frame = self.tsframe[:1].reindex(columns=[])
result = frame.mul(ts, axis='index')
assert len(result) == len(ts)
def test_combineFunc(self):
result = self.frame * 2
tm.assert_numpy_array_equal(result.values, self.frame.values * 2)
# vs mix
result = self.mixed_float * 2
for c, s in compat.iteritems(result):
tm.assert_numpy_array_equal(
s.values, self.mixed_float[c].values * 2)
_check_mixed_float(result, dtype=dict(C=None))
result = self.empty * 2
assert result.index is self.empty.index
assert len(result.columns) == 0
def test_comparisons(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
row = self.simple.xs('a')
ndim_5 = np.ones(df1.shape + (1, 1, 1))
def test_comp(func):
result = func(df1, df2)
tm.assert_numpy_array_equal(result.values,
func(df1.values, df2.values))
with tm.assert_raises_regex(ValueError,
'Wrong number of dimensions'):
func(df1, ndim_5)
result2 = func(self.simple, row)
tm.assert_numpy_array_equal(result2.values,
func(self.simple.values, row.values))
result3 = func(self.frame, 0)
tm.assert_numpy_array_equal(result3.values,
func(self.frame.values, 0))
with tm.assert_raises_regex(ValueError,
'Can only compare identically'
'-labeled DataFrame'):
func(self.simple, self.simple[:2])
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_comparison_protected_from_errstate(self):
missing_df = tm.makeDataFrame()
missing_df.iloc[0]['A'] = np.nan
with np.errstate(invalid='ignore'):
expected = missing_df.values < 0
with np.errstate(invalid='raise'):
result = (missing_df < 0).values
tm.assert_numpy_array_equal(result, expected)
def test_boolean_comparison(self):
# GH 4576
# boolean comparisons with a tuple/list give unexpected results
df = DataFrame(np.arange(6).reshape((3, 2)))
b = np.array([2, 2])
b_r = np.atleast_2d([2, 2])
b_c = b_r.T
l = (2, 2, 2)
tup = tuple(l)
# gt
expected = DataFrame([[False, False], [False, True], [True, True]])
result = df > b
assert_frame_equal(result, expected)
result = df.values > b
assert_numpy_array_equal(result, expected.values)
result = df > l
assert_frame_equal(result, expected)
result = df > tup
assert_frame_equal(result, expected)
result = df > b_r
assert_frame_equal(result, expected)
result = df.values > b_r
assert_numpy_array_equal(result, expected.values)
pytest.raises(ValueError, df.__gt__, b_c)
pytest.raises(ValueError, df.values.__gt__, b_c)
# ==
expected = DataFrame([[False, False], [True, False], [False, False]])
result = df == b
assert_frame_equal(result, expected)
result = df == l
assert_frame_equal(result, expected)
result = df == tup
assert_frame_equal(result, expected)
result = df == b_r
assert_frame_equal(result, expected)
result = df.values == b_r
assert_numpy_array_equal(result, expected.values)
pytest.raises(ValueError, lambda: df == b_c)
assert df.values.shape != b_c.shape
# with alignment
df = DataFrame(np.arange(6).reshape((3, 2)),
columns=list('AB'), index=list('abc'))
expected.index = df.index
expected.columns = df.columns
result = df == l
assert_frame_equal(result, expected)
result = df == tup
assert_frame_equal(result, expected)
def test_combine_generic(self):
df1 = self.frame
df2 = self.frame.loc[self.frame.index[:-5], ['A', 'B', 'C']]
combined = df1.combine(df2, np.add)
combined2 = df2.combine(df1, np.add)
assert combined['D'].isna().all()
assert combined2['D'].isna().all()
chunk = combined.loc[combined.index[:-5], ['A', 'B', 'C']]
chunk2 = combined2.loc[combined2.index[:-5], ['A', 'B', 'C']]
exp = self.frame.loc[self.frame.index[:-5],
['A', 'B', 'C']].reindex_like(chunk) * 2
assert_frame_equal(chunk, exp)
assert_frame_equal(chunk2, exp)
def test_inplace_ops_alignment(self):
# inplace ops / ops alignment
# GH 8511
columns = list('abcdefg')
X_orig = DataFrame(np.arange(10 * len(columns))
.reshape(-1, len(columns)),
columns=columns, index=range(10))
Z = 100 * X_orig.iloc[:, 1:-1].copy()
block1 = list('bedcf')
subs = list('bcdef')
# add
X = X_orig.copy()
result1 = (X[block1] + Z).reindex(columns=subs)
X[block1] += Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] + Z[block1]).reindex(columns=subs)
X[block1] += Z[block1]
result4 = X.reindex(columns=subs)
assert_frame_equal(result1, result2)
assert_frame_equal(result1, result3)
assert_frame_equal(result1, result4)
# sub
X = X_orig.copy()
result1 = (X[block1] - Z).reindex(columns=subs)
X[block1] -= Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] - Z[block1]).reindex(columns=subs)
X[block1] -= Z[block1]
result4 = X.reindex(columns=subs)
assert_frame_equal(result1, result2)
assert_frame_equal(result1, result3)
assert_frame_equal(result1, result4)
def test_inplace_ops_identity(self):
# GH 5104
# make sure that we are actually changing the object
s_orig = Series([1, 2, 3])
df_orig = DataFrame(np.random.randint(0, 5, size=10).reshape(-1, 5))
# no dtype change
s = s_orig.copy()
s2 = s
s += 1
assert_series_equal(s, s2)
assert_series_equal(s_orig + 1, s)
assert s is s2
assert s._data is s2._data
df = df_orig.copy()
df2 = df
df += 1
assert_frame_equal(df, df2)
assert_frame_equal(df_orig + 1, df)
assert df is df2
assert df._data is df2._data
# dtype change
s = s_orig.copy()
s2 = s
s += 1.5
assert_series_equal(s, s2)
assert_series_equal(s_orig + 1.5, s)
df = df_orig.copy()
df2 = df
df += 1.5
assert_frame_equal(df, df2)
assert_frame_equal(df_orig + 1.5, df)
assert df is df2
assert df._data is df2._data
# mixed dtype
arr = np.random.randint(0, 10, size=5)
df_orig = DataFrame({'A': arr.copy(), 'B': 'foo'})
df = df_orig.copy()
df2 = df
df['A'] += 1
expected = DataFrame({'A': arr.copy() + 1, 'B': 'foo'})
assert_frame_equal(df, expected)
assert_frame_equal(df2, expected)
assert df._data is df2._data
df = df_orig.copy()
df2 = df
df['A'] += 1.5
expected = DataFrame({'A': arr.copy() + 1.5, 'B': 'foo'})
assert_frame_equal(df, expected)
assert_frame_equal(df2, expected)
assert df._data is df2._data
@pytest.mark.parametrize('op', ['add', 'and', 'div', 'floordiv', 'mod',
'mul', 'or', 'pow', 'sub', 'truediv',
'xor'])
def test_inplace_ops_identity2(self, op):
if compat.PY3 and op == 'div':
return
df = DataFrame({'a': [1., 2., 3.],
'b': [1, 2, 3]})
operand = 2
if op in ('and', 'or', 'xor'):
# cannot use floats for boolean ops
df['a'] = [True, False, True]
df_copy = df.copy()
iop = '__i{}__'.format(op)
op = '__{}__'.format(op)
# no id change and value is correct
getattr(df, iop)(operand)
expected = getattr(df_copy, op)(operand)
assert_frame_equal(df, expected)
expected = id(df)
assert id(df) == expected
def test_alignment_non_pandas(self):
index = ['A', 'B', 'C']
columns = ['X', 'Y', 'Z']
df = pd.DataFrame(np.random.randn(3, 3), index=index, columns=columns)
align = pd.core.ops._align_method_FRAME
for val in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype=np.int64),
range(1, 4)]:
tm.assert_series_equal(align(df, val, 'index'),
Series([1, 2, 3], index=df.index))
tm.assert_series_equal(align(df, val, 'columns'),
Series([1, 2, 3], index=df.columns))
# length mismatch
msg = 'Unable to coerce to Series, length must be 3: given 2'
for val in [[1, 2], (1, 2), np.array([1, 2]), range(1, 3)]:
with tm.assert_raises_regex(ValueError, msg):
align(df, val, 'index')
with tm.assert_raises_regex(ValueError, msg):
align(df, val, 'columns')
val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
tm.assert_frame_equal(align(df, val, 'index'),
DataFrame(val, index=df.index,
columns=df.columns))
tm.assert_frame_equal(align(df, val, 'columns'),
DataFrame(val, index=df.index,
columns=df.columns))
# shape mismatch
msg = 'Unable to coerce to DataFrame, shape must be'
val = np.array([[1, 2, 3], [4, 5, 6]])
with tm.assert_raises_regex(ValueError, msg):
align(df, val, 'index')
with tm.assert_raises_regex(ValueError, msg):
align(df, val, 'columns')
val = np.zeros((3, 3, 3))
with pytest.raises(ValueError):
align(df, val, 'index')
with pytest.raises(ValueError):
align(df, val, 'columns')
| bsd-3-clause |
BoltzmannBrain/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/cbook.py | 69 | 42525 | """
A collection of utility functions and classes. Many (but not all)
from the Python Cookbook -- hence the name cbook
"""
from __future__ import generators
import re, os, errno, sys, StringIO, traceback, locale, threading, types
import time, datetime
import warnings
import numpy as np
import numpy.ma as ma
from weakref import ref
major, minor1, minor2, s, tmp = sys.version_info
# on some systems, locale.getpreferredencoding returns None, which can break unicode
preferredencoding = locale.getpreferredencoding()
def unicode_safe(s):
if preferredencoding is None: return unicode(s)
else: return unicode(s, preferredencoding)
class converter:
"""
Base class for handling string -> python type with support for
missing values
"""
def __init__(self, missing='Null', missingval=None):
self.missing = missing
self.missingval = missingval
def __call__(self, s):
if s==self.missing: return self.missingval
return s
def is_missing(self, s):
return not s.strip() or s==self.missing
class tostr(converter):
'convert to string or None'
def __init__(self, missing='Null', missingval=''):
converter.__init__(self, missing=missing, missingval=missingval)
class todatetime(converter):
'convert to a datetime or None'
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s): return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.datetime(*tup[:6])
class todate(converter):
'convert to a date or None'
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s): return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.date(*tup[:3])
class tofloat(converter):
'convert to a float or None'
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
self.missingval = missingval
def __call__(self, s):
if self.is_missing(s): return self.missingval
return float(s)
class toint(converter):
'convert to an int or None'
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
def __call__(self, s):
if self.is_missing(s): return self.missingval
return int(s)
class CallbackRegistry:
"""
Handle registering and disconnecting for a set of signals and
callbacks::
signals = 'eat', 'drink', 'be merry'
def oneat(x):
print 'eat', x
def ondrink(x):
print 'drink', x
callbacks = CallbackRegistry(signals)
ideat = callbacks.connect('eat', oneat)
iddrink = callbacks.connect('drink', ondrink)
#tmp = callbacks.connect('drunk', ondrink) # this will raise a ValueError
callbacks.process('drink', 123) # will call oneat
callbacks.process('eat', 456) # will call ondrink
callbacks.process('be merry', 456) # nothing will be called
callbacks.disconnect(ideat) # disconnect oneat
callbacks.process('eat', 456) # nothing will be called
"""
def __init__(self, signals):
'*signals* is a sequence of valid signals'
self.signals = set(signals)
# callbacks is a dict mapping the signal to a dictionary
# mapping callback id to the callback function
self.callbacks = dict([(s, dict()) for s in signals])
self._cid = 0
def _check_signal(self, s):
'make sure *s* is a valid signal or raise a ValueError'
if s not in self.signals:
signals = list(self.signals)
signals.sort()
raise ValueError('Unknown signal "%s"; valid signals are %s'%(s, signals))
def connect(self, s, func):
"""
register *func* to be called when a signal *s* is generated
func will be called
"""
self._check_signal(s)
self._cid +=1
self.callbacks[s][self._cid] = func
return self._cid
def disconnect(self, cid):
"""
disconnect the callback registered with callback id *cid*
"""
for eventname, callbackd in self.callbacks.items():
try: del callbackd[cid]
except KeyError: continue
else: return
def process(self, s, *args, **kwargs):
"""
process signal *s*. All of the functions registered to receive
callbacks on *s* will be called with *\*args* and *\*\*kwargs*
"""
self._check_signal(s)
for func in self.callbacks[s].values():
func(*args, **kwargs)
class Scheduler(threading.Thread):
"""
Base class for timeout and idle scheduling
"""
idlelock = threading.Lock()
id = 0
def __init__(self):
threading.Thread.__init__(self)
self.id = Scheduler.id
self._stopped = False
Scheduler.id += 1
self._stopevent = threading.Event()
def stop(self):
if self._stopped: return
self._stopevent.set()
self.join()
self._stopped = True
class Timeout(Scheduler):
"""
Schedule recurring events with a wait time in seconds
"""
def __init__(self, wait, func):
Scheduler.__init__(self)
self.wait = wait
self.func = func
def run(self):
while not self._stopevent.isSet():
self._stopevent.wait(self.wait)
Scheduler.idlelock.acquire()
b = self.func(self)
Scheduler.idlelock.release()
if not b: break
class Idle(Scheduler):
"""
Schedule callbacks when scheduler is idle
"""
# the prototype impl is a bit of a poor man's idle handler. It
# just implements a short wait time. But it will provide a
# placeholder for a proper impl ater
waittime = 0.05
def __init__(self, func):
Scheduler.__init__(self)
self.func = func
def run(self):
while not self._stopevent.isSet():
self._stopevent.wait(Idle.waittime)
Scheduler.idlelock.acquire()
b = self.func(self)
Scheduler.idlelock.release()
if not b: break
class silent_list(list):
"""
override repr when returning a list of matplotlib artists to
prevent long, meaningless output. This is meant to be used for a
homogeneous list of a give type
"""
def __init__(self, type, seq=None):
self.type = type
if seq is not None: self.extend(seq)
def __repr__(self):
return '<a list of %d %s objects>' % (len(self), self.type)
def __str__(self):
return '<a list of %d %s objects>' % (len(self), self.type)
def strip_math(s):
'remove latex formatting from mathtext'
remove = (r'\mathdefault', r'\rm', r'\cal', r'\tt', r'\it', '\\', '{', '}')
s = s[1:-1]
for r in remove: s = s.replace(r,'')
return s
class Bunch:
"""
Often we want to just collect a bunch of stuff together, naming each
item of the bunch; a dictionary's OK for that, but a small do- nothing
class is even handier, and prettier to use. Whenever you want to
group a few variables:
>>> point = Bunch(datum=2, squared=4, coord=12)
>>> point.datum
By: Alex Martelli
From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52308
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def unique(x):
'Return a list of unique elements of *x*'
return dict([ (val, 1) for val in x]).keys()
def iterable(obj):
'return true if *obj* is iterable'
try: len(obj)
except: return False
return True
def is_string_like(obj):
'Return True if *obj* looks like a string'
if isinstance(obj, (str, unicode)): return True
# numpy strings are subclass of str, ma strings are not
if ma.isMaskedArray(obj):
if obj.ndim == 0 and obj.dtype.kind in 'SU':
return True
else:
return False
try: obj + ''
except (TypeError, ValueError): return False
return True
def is_sequence_of_strings(obj):
"""
Returns true if *obj* is iterable and contains strings
"""
if not iterable(obj): return False
if is_string_like(obj): return False
for o in obj:
if not is_string_like(o): return False
return True
def is_writable_file_like(obj):
'return true if *obj* looks like a file object with a *write* method'
return hasattr(obj, 'write') and callable(obj.write)
def is_scalar(obj):
'return true if *obj* is not string like and is not iterable'
return not is_string_like(obj) and not iterable(obj)
def is_numlike(obj):
'return true if *obj* looks like a number'
try: obj+1
except TypeError: return False
else: return True
def to_filehandle(fname, flag='r', return_opened=False):
"""
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in .gz. *flag* is a
read/write flag for :func:`file`
"""
if is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, flag)
else:
fh = file(fname, flag)
opened = True
elif hasattr(fname, 'seek'):
fh = fname
opened = False
else:
raise ValueError('fname must be a string or file handle')
if return_opened:
return fh, opened
return fh
def is_scalar_or_string(val):
return is_string_like(val) or not iterable(val)
def flatten(seq, scalarp=is_scalar_or_string):
"""
this generator flattens nested containers such as
>>> l=( ('John', 'Hunter'), (1,23), [[[[42,(5,23)]]]])
so that
>>> for i in flatten(l): print i,
John Hunter 1 23 42 5 23
By: Composite of Holger Krekel and Luther Blissett
From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/121294
and Recipe 1.12 in cookbook
"""
for item in seq:
if scalarp(item): yield item
else:
for subitem in flatten(item, scalarp):
yield subitem
class Sorter:
"""
Sort by attribute or item
Example usage::
sort = Sorter()
list = [(1, 2), (4, 8), (0, 3)]
dict = [{'a': 3, 'b': 4}, {'a': 5, 'b': 2}, {'a': 0, 'b': 0},
{'a': 9, 'b': 9}]
sort(list) # default sort
sort(list, 1) # sort by index 1
sort(dict, 'a') # sort a list of dicts by key 'a'
"""
def _helper(self, data, aux, inplace):
aux.sort()
result = [data[i] for junk, i in aux]
if inplace: data[:] = result
return result
def byItem(self, data, itemindex=None, inplace=1):
if itemindex is None:
if inplace:
data.sort()
result = data
else:
result = data[:]
result.sort()
return result
else:
aux = [(data[i][itemindex], i) for i in range(len(data))]
return self._helper(data, aux, inplace)
def byAttribute(self, data, attributename, inplace=1):
aux = [(getattr(data[i],attributename),i) for i in range(len(data))]
return self._helper(data, aux, inplace)
# a couple of handy synonyms
sort = byItem
__call__ = byItem
class Xlator(dict):
"""
All-in-one multiple-string-substitution class
Example usage::
text = "Larry Wall is the creator of Perl"
adict = {
"Larry Wall" : "Guido van Rossum",
"creator" : "Benevolent Dictator for Life",
"Perl" : "Python",
}
print multiple_replace(adict, text)
xlat = Xlator(adict)
print xlat.xlat(text)
"""
def _make_regex(self):
""" Build re object based on the keys of the current dictionary """
return re.compile("|".join(map(re.escape, self.keys())))
def __call__(self, match):
""" Handler invoked for each regex *match* """
return self[match.group(0)]
def xlat(self, text):
""" Translate *text*, returns the modified text. """
return self._make_regex().sub(self, text)
def soundex(name, len=4):
""" soundex module conforming to Odell-Russell algorithm """
# digits holds the soundex values for the alphabet
soundex_digits = '01230120022455012623010202'
sndx = ''
fc = ''
# Translate letters in name to soundex digits
for c in name.upper():
if c.isalpha():
if not fc: fc = c # Remember first letter
d = soundex_digits[ord(c)-ord('A')]
# Duplicate consecutive soundex digits are skipped
if not sndx or (d != sndx[-1]):
sndx += d
# Replace first digit with first letter
sndx = fc + sndx[1:]
# Remove all 0s from the soundex code
sndx = sndx.replace('0', '')
# Return soundex code truncated or 0-padded to len characters
return (sndx + (len * '0'))[:len]
class Null:
""" Null objects always and reliably "do nothing." """
def __init__(self, *args, **kwargs): pass
def __call__(self, *args, **kwargs): return self
def __str__(self): return "Null()"
def __repr__(self): return "Null()"
def __nonzero__(self): return 0
def __getattr__(self, name): return self
def __setattr__(self, name, value): return self
def __delattr__(self, name): return self
def mkdirs(newdir, mode=0777):
"""
make directory *newdir* recursively, and set *mode*. Equivalent to ::
> mkdir -p NEWDIR
> chmod MODE NEWDIR
"""
try:
if not os.path.exists(newdir):
parts = os.path.split(newdir)
for i in range(1, len(parts)+1):
thispart = os.path.join(*parts[:i])
if not os.path.exists(thispart):
os.makedirs(thispart, mode)
except OSError, err:
# Reraise the error unless it's about an already existing directory
if err.errno != errno.EEXIST or not os.path.isdir(newdir):
raise
class GetRealpathAndStat:
def __init__(self):
self._cache = {}
def __call__(self, path):
result = self._cache.get(path)
if result is None:
realpath = os.path.realpath(path)
if sys.platform == 'win32':
stat_key = realpath
else:
stat = os.stat(realpath)
stat_key = (stat.st_ino, stat.st_dev)
result = realpath, stat_key
self._cache[path] = result
return result
get_realpath_and_stat = GetRealpathAndStat()
def dict_delall(d, keys):
'delete all of the *keys* from the :class:`dict` *d*'
for key in keys:
try: del d[key]
except KeyError: pass
class RingBuffer:
""" class that implements a not-yet-full buffer """
def __init__(self,size_max):
self.max = size_max
self.data = []
class __Full:
""" class that implements a full buffer """
def append(self, x):
""" Append an element overwriting the oldest one. """
self.data[self.cur] = x
self.cur = (self.cur+1) % self.max
def get(self):
""" return list of elements in correct order """
return self.data[self.cur:]+self.data[:self.cur]
def append(self,x):
"""append an element at the end of the buffer"""
self.data.append(x)
if len(self.data) == self.max:
self.cur = 0
# Permanently change self's class from non-full to full
self.__class__ = __Full
def get(self):
""" Return a list of elements from the oldest to the newest. """
return self.data
def __get_item__(self, i):
return self.data[i % len(self.data)]
def get_split_ind(seq, N):
"""
*seq* is a list of words. Return the index into seq such that::
len(' '.join(seq[:ind])<=N
"""
sLen = 0
# todo: use Alex's xrange pattern from the cbook for efficiency
for (word, ind) in zip(seq, range(len(seq))):
sLen += len(word) + 1 # +1 to account for the len(' ')
if sLen>=N: return ind
return len(seq)
def wrap(prefix, text, cols):
'wrap *text* with *prefix* at length *cols*'
pad = ' '*len(prefix.expandtabs())
available = cols - len(pad)
seq = text.split(' ')
Nseq = len(seq)
ind = 0
lines = []
while ind<Nseq:
lastInd = ind
ind += get_split_ind(seq[ind:], available)
lines.append(seq[lastInd:ind])
# add the prefix to the first line, pad with spaces otherwise
ret = prefix + ' '.join(lines[0]) + '\n'
for line in lines[1:]:
ret += pad + ' '.join(line) + '\n'
return ret
# A regular expression used to determine the amount of space to
# remove. It looks for the first sequence of spaces immediately
# following the first newline, or at the beginning of the string.
_find_dedent_regex = re.compile("(?:(?:\n\r?)|^)( *)\S")
# A cache to hold the regexs that actually remove the indent.
_dedent_regex = {}
def dedent(s):
"""
Remove excess indentation from docstring *s*.
Discards any leading blank lines, then removes up to n whitespace
characters from each line, where n is the number of leading
whitespace characters in the first line. It differs from
textwrap.dedent in its deletion of leading blank lines and its use
of the first non-blank line to determine the indentation.
It is also faster in most cases.
"""
# This implementation has a somewhat obtuse use of regular
# expressions. However, this function accounted for almost 30% of
# matplotlib startup time, so it is worthy of optimization at all
# costs.
if not s: # includes case of s is None
return ''
match = _find_dedent_regex.match(s)
if match is None:
return s
# This is the number of spaces to remove from the left-hand side.
nshift = match.end(1) - match.start(1)
if nshift == 0:
return s
# Get a regex that will remove *up to* nshift spaces from the
# beginning of each line. If it isn't in the cache, generate it.
unindent = _dedent_regex.get(nshift, None)
if unindent is None:
unindent = re.compile("\n\r? {0,%d}" % nshift)
_dedent_regex[nshift] = unindent
result = unindent.sub("\n", s).strip()
return result
def listFiles(root, patterns='*', recurse=1, return_folders=0):
"""
Recursively list files
from Parmar and Martelli in the Python Cookbook
"""
import os.path, fnmatch
# Expand patterns from semicolon-separated string to list
pattern_list = patterns.split(';')
# Collect input and output arguments into one bunch
class Bunch:
def __init__(self, **kwds): self.__dict__.update(kwds)
arg = Bunch(recurse=recurse, pattern_list=pattern_list,
return_folders=return_folders, results=[])
def visit(arg, dirname, files):
# Append to arg.results all relevant files (and perhaps folders)
for name in files:
fullname = os.path.normpath(os.path.join(dirname, name))
if arg.return_folders or os.path.isfile(fullname):
for pattern in arg.pattern_list:
if fnmatch.fnmatch(name, pattern):
arg.results.append(fullname)
break
# Block recursion if recursion was disallowed
if not arg.recurse: files[:]=[]
os.path.walk(root, visit, arg)
return arg.results
def get_recursive_filelist(args):
"""
Recurs all the files and dirs in *args* ignoring symbolic links
and return the files as a list of strings
"""
files = []
for arg in args:
if os.path.isfile(arg):
files.append(arg)
continue
if os.path.isdir(arg):
newfiles = listFiles(arg, recurse=1, return_folders=1)
files.extend(newfiles)
return [f for f in files if not os.path.islink(f)]
def pieces(seq, num=2):
"Break up the *seq* into *num* tuples"
start = 0
while 1:
item = seq[start:start+num]
if not len(item): break
yield item
start += num
def exception_to_str(s = None):
sh = StringIO.StringIO()
if s is not None: print >>sh, s
traceback.print_exc(file=sh)
return sh.getvalue()
def allequal(seq):
"""
Return *True* if all elements of *seq* compare equal. If *seq* is
0 or 1 length, return *True*
"""
if len(seq)<2: return True
val = seq[0]
for i in xrange(1, len(seq)):
thisval = seq[i]
if thisval != val: return False
return True
def alltrue(seq):
"""
Return *True* if all elements of *seq* evaluate to *True*. If
*seq* is empty, return *False*.
"""
if not len(seq): return False
for val in seq:
if not val: return False
return True
def onetrue(seq):
"""
Return *True* if one element of *seq* is *True*. It *seq* is
empty, return *False*.
"""
if not len(seq): return False
for val in seq:
if val: return True
return False
def allpairs(x):
"""
return all possible pairs in sequence *x*
Condensed by Alex Martelli from this thread_ on c.l.python
.. _thread: http://groups.google.com/groups?q=all+pairs+group:*python*&hl=en&lr=&ie=UTF-8&selm=mailman.4028.1096403649.5135.python-list%40python.org&rnum=1
"""
return [ (s, f) for i, f in enumerate(x) for s in x[i+1:] ]
# python 2.2 dicts don't have pop--but we don't support 2.2 any more
def popd(d, *args):
"""
Should behave like python2.3 :meth:`dict.pop` method; *d* is a
:class:`dict`::
# returns value for key and deletes item; raises a KeyError if key
# is not in dict
val = popd(d, key)
# returns value for key if key exists, else default. Delete key,
# val item if it exists. Will not raise a KeyError
val = popd(d, key, default)
"""
warnings.warn("Use native python dict.pop method", DeprecationWarning)
# warning added 2008/07/22
if len(args)==1:
key = args[0]
val = d[key]
del d[key]
elif len(args)==2:
key, default = args
val = d.get(key, default)
try: del d[key]
except KeyError: pass
return val
class maxdict(dict):
"""
A dictionary with a maximum size; this doesn't override all the
relevant methods to contrain size, just setitem, so use with
caution
"""
def __init__(self, maxsize):
dict.__init__(self)
self.maxsize = maxsize
self._killkeys = []
def __setitem__(self, k, v):
if len(self)>=self.maxsize:
del self[self._killkeys[0]]
del self._killkeys[0]
dict.__setitem__(self, k, v)
self._killkeys.append(k)
class Stack:
"""
Implement a stack where elements can be pushed on and you can move
back and forth. But no pop. Should mimic home / back / forward
in a browser
"""
def __init__(self, default=None):
self.clear()
self._default = default
def __call__(self):
'return the current element, or None'
if not len(self._elements): return self._default
else: return self._elements[self._pos]
def forward(self):
'move the position forward and return the current element'
N = len(self._elements)
if self._pos<N-1: self._pos += 1
return self()
def back(self):
'move the position back and return the current element'
if self._pos>0: self._pos -= 1
return self()
def push(self, o):
"""
push object onto stack at current position - all elements
occurring later than the current position are discarded
"""
self._elements = self._elements[:self._pos+1]
self._elements.append(o)
self._pos = len(self._elements)-1
return self()
def home(self):
'push the first element onto the top of the stack'
if not len(self._elements): return
self.push(self._elements[0])
return self()
def empty(self):
return len(self._elements)==0
def clear(self):
'empty the stack'
self._pos = -1
self._elements = []
def bubble(self, o):
"""
raise *o* to the top of the stack and return *o*. *o* must be
in the stack
"""
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
bubbles = []
for thiso in old:
if thiso==o: bubbles.append(thiso)
else: self.push(thiso)
for thiso in bubbles:
self.push(o)
return o
def remove(self, o):
'remove element *o* from the stack'
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
for thiso in old:
if thiso==o: continue
else: self.push(thiso)
def popall(seq):
'empty a list'
for i in xrange(len(seq)): seq.pop()
def finddir(o, match, case=False):
"""
return all attributes of *o* which match string in match. if case
is True require an exact case match.
"""
if case:
names = [(name,name) for name in dir(o) if is_string_like(name)]
else:
names = [(name.lower(), name) for name in dir(o) if is_string_like(name)]
match = match.lower()
return [orig for name, orig in names if name.find(match)>=0]
def reverse_dict(d):
'reverse the dictionary -- may lose data if values are not unique!'
return dict([(v,k) for k,v in d.items()])
def report_memory(i=0): # argument may go away
'return the memory consumed by process'
pid = os.getpid()
if sys.platform=='sunos5':
a2 = os.popen('ps -p %d -o osz' % pid).readlines()
mem = int(a2[-1].strip())
elif sys.platform.startswith('linux'):
a2 = os.popen('ps -p %d -o rss,sz' % pid).readlines()
mem = int(a2[1].split()[1])
elif sys.platform.startswith('darwin'):
a2 = os.popen('ps -p %d -o rss,vsz' % pid).readlines()
mem = int(a2[1].split()[0])
return mem
_safezip_msg = 'In safezip, len(args[0])=%d but len(args[%d])=%d'
def safezip(*args):
'make sure *args* are equal len before zipping'
Nx = len(args[0])
for i, arg in enumerate(args[1:]):
if len(arg) != Nx:
raise ValueError(_safezip_msg % (Nx, i+1, len(arg)))
return zip(*args)
def issubclass_safe(x, klass):
'return issubclass(x, klass) and return False on a TypeError'
try:
return issubclass(x, klass)
except TypeError:
return False
class MemoryMonitor:
def __init__(self, nmax=20000):
self._nmax = nmax
self._mem = np.zeros((self._nmax,), np.int32)
self.clear()
def clear(self):
self._n = 0
self._overflow = False
def __call__(self):
mem = report_memory()
if self._n < self._nmax:
self._mem[self._n] = mem
self._n += 1
else:
self._overflow = True
return mem
def report(self, segments=4):
n = self._n
segments = min(n, segments)
dn = int(n/segments)
ii = range(0, n, dn)
ii[-1] = n-1
print
print 'memory report: i, mem, dmem, dmem/nloops'
print 0, self._mem[0]
for i in range(1, len(ii)):
di = ii[i] - ii[i-1]
if di == 0:
continue
dm = self._mem[ii[i]] - self._mem[ii[i-1]]
print '%5d %5d %3d %8.3f' % (ii[i], self._mem[ii[i]],
dm, dm / float(di))
if self._overflow:
print "Warning: array size was too small for the number of calls."
def xy(self, i0=0, isub=1):
x = np.arange(i0, self._n, isub)
return x, self._mem[i0:self._n:isub]
def plot(self, i0=0, isub=1, fig=None):
if fig is None:
from pylab import figure, show
fig = figure()
ax = fig.add_subplot(111)
ax.plot(*self.xy(i0, isub))
fig.canvas.draw()
def print_cycles(objects, outstream=sys.stdout, show_progress=False):
"""
*objects*
A list of objects to find cycles in. It is often useful to
pass in gc.garbage to find the cycles that are preventing some
objects from being garbage collected.
*outstream*
The stream for output.
*show_progress*
If True, print the number of objects reached as they are found.
"""
import gc
from types import FrameType
def print_path(path):
for i, step in enumerate(path):
# next "wraps around"
next = path[(i + 1) % len(path)]
outstream.write(" %s -- " % str(type(step)))
if isinstance(step, dict):
for key, val in step.items():
if val is next:
outstream.write("[%s]" % repr(key))
break
if key is next:
outstream.write("[key] = %s" % repr(val))
break
elif isinstance(step, list):
outstream.write("[%d]" % step.index(next))
elif isinstance(step, tuple):
outstream.write("( tuple )")
else:
outstream.write(repr(step))
outstream.write(" ->\n")
outstream.write("\n")
def recurse(obj, start, all, current_path):
if show_progress:
outstream.write("%d\r" % len(all))
all[id(obj)] = None
referents = gc.get_referents(obj)
for referent in referents:
# If we've found our way back to the start, this is
# a cycle, so print it out
if referent is start:
print_path(current_path)
# Don't go back through the original list of objects, or
# through temporary references to the object, since those
# are just an artifact of the cycle detector itself.
elif referent is objects or isinstance(referent, FrameType):
continue
# We haven't seen this object before, so recurse
elif id(referent) not in all:
recurse(referent, start, all, current_path + [obj])
for obj in objects:
outstream.write("Examining: %r\n" % (obj,))
recurse(obj, obj, { }, [])
class Grouper(object):
"""
This class provides a lightweight way to group arbitrary objects
together into disjoint sets when a full-blown graph data structure
would be overkill.
Objects can be joined using :meth:`join`, tested for connectedness
using :meth:`joined`, and all disjoint sets can be retreived by
using the object as an iterator.
The objects being joined must be hashable.
For example:
>>> g = grouper.Grouper()
>>> g.join('a', 'b')
>>> g.join('b', 'c')
>>> g.join('d', 'e')
>>> list(g)
[['a', 'b', 'c'], ['d', 'e']]
>>> g.joined('a', 'b')
True
>>> g.joined('a', 'c')
True
>>> g.joined('a', 'd')
False
"""
def __init__(self, init=[]):
mapping = self._mapping = {}
for x in init:
mapping[ref(x)] = [ref(x)]
def __contains__(self, item):
return ref(item) in self._mapping
def clean(self):
"""
Clean dead weak references from the dictionary
"""
mapping = self._mapping
for key, val in mapping.items():
if key() is None:
del mapping[key]
val.remove(key)
def join(self, a, *args):
"""
Join given arguments into the same set. Accepts one or more
arguments.
"""
mapping = self._mapping
set_a = mapping.setdefault(ref(a), [ref(a)])
for arg in args:
set_b = mapping.get(ref(arg))
if set_b is None:
set_a.append(ref(arg))
mapping[ref(arg)] = set_a
elif set_b is not set_a:
if len(set_b) > len(set_a):
set_a, set_b = set_b, set_a
set_a.extend(set_b)
for elem in set_b:
mapping[elem] = set_a
self.clean()
def joined(self, a, b):
"""
Returns True if *a* and *b* are members of the same set.
"""
self.clean()
mapping = self._mapping
try:
return mapping[ref(a)] is mapping[ref(b)]
except KeyError:
return False
def __iter__(self):
"""
Iterate over each of the disjoint sets as a list.
The iterator is invalid if interleaved with calls to join().
"""
self.clean()
class Token: pass
token = Token()
# Mark each group as we come across if by appending a token,
# and don't yield it twice
for group in self._mapping.itervalues():
if not group[-1] is token:
yield [x() for x in group]
group.append(token)
# Cleanup the tokens
for group in self._mapping.itervalues():
if group[-1] is token:
del group[-1]
def get_siblings(self, a):
"""
Returns all of the items joined with *a*, including itself.
"""
self.clean()
siblings = self._mapping.get(ref(a), [ref(a)])
return [x() for x in siblings]
def simple_linear_interpolation(a, steps):
steps = np.floor(steps)
new_length = ((len(a) - 1) * steps) + 1
new_shape = list(a.shape)
new_shape[0] = new_length
result = np.zeros(new_shape, a.dtype)
result[0] = a[0]
a0 = a[0:-1]
a1 = a[1: ]
delta = ((a1 - a0) / steps)
for i in range(1, int(steps)):
result[i::steps] = delta * i + a0
result[steps::steps] = a1
return result
def recursive_remove(path):
if os.path.isdir(path):
for fname in glob.glob(os.path.join(path, '*')) + glob.glob(os.path.join(path, '.*')):
if os.path.isdir(fname):
recursive_remove(fname)
os.removedirs(fname)
else:
os.remove(fname)
#os.removedirs(path)
else:
os.remove(path)
def delete_masked_points(*args):
"""
Find all masked and/or non-finite points in a set of arguments,
and return the arguments with only the unmasked points remaining.
Arguments can be in any of 5 categories:
1) 1-D masked arrays
2) 1-D ndarrays
3) ndarrays with more than one dimension
4) other non-string iterables
5) anything else
The first argument must be in one of the first four categories;
any argument with a length differing from that of the first
argument (and hence anything in category 5) then will be
passed through unchanged.
Masks are obtained from all arguments of the correct length
in categories 1, 2, and 4; a point is bad if masked in a masked
array or if it is a nan or inf. No attempt is made to
extract a mask from categories 2, 3, and 4 if :meth:`np.isfinite`
does not yield a Boolean array.
All input arguments that are not passed unchanged are returned
as ndarrays after removing the points or rows corresponding to
masks in any of the arguments.
A vastly simpler version of this function was originally
written as a helper for Axes.scatter().
"""
if not len(args):
return ()
if (is_string_like(args[0]) or not iterable(args[0])):
raise ValueError("First argument must be a sequence")
nrecs = len(args[0])
margs = []
seqlist = [False] * len(args)
for i, x in enumerate(args):
if (not is_string_like(x)) and iterable(x) and len(x) == nrecs:
seqlist[i] = True
if ma.isMA(x):
if x.ndim > 1:
raise ValueError("Masked arrays must be 1-D")
else:
x = np.asarray(x)
margs.append(x)
masks = [] # list of masks that are True where good
for i, x in enumerate(margs):
if seqlist[i]:
if x.ndim > 1:
continue # Don't try to get nan locations unless 1-D.
if ma.isMA(x):
masks.append(~ma.getmaskarray(x)) # invert the mask
xd = x.data
else:
xd = x
try:
mask = np.isfinite(xd)
if isinstance(mask, np.ndarray):
masks.append(mask)
except: #Fixme: put in tuple of possible exceptions?
pass
if len(masks):
mask = reduce(np.logical_and, masks)
igood = mask.nonzero()[0]
if len(igood) < nrecs:
for i, x in enumerate(margs):
if seqlist[i]:
margs[i] = x.take(igood, axis=0)
for i, x in enumerate(margs):
if seqlist[i] and ma.isMA(x):
margs[i] = x.filled()
return margs
def unmasked_index_ranges(mask, compressed = True):
'''
Find index ranges where *mask* is *False*.
*mask* will be flattened if it is not already 1-D.
Returns Nx2 :class:`numpy.ndarray` with each row the start and stop
indices for slices of the compressed :class:`numpy.ndarray`
corresponding to each of *N* uninterrupted runs of unmasked
values. If optional argument *compressed* is *False*, it returns
the start and stop indices into the original :class:`numpy.ndarray`,
not the compressed :class:`numpy.ndarray`. Returns *None* if there
are no unmasked values.
Example::
y = ma.array(np.arange(5), mask = [0,0,1,0,0])
ii = unmasked_index_ranges(ma.getmaskarray(y))
# returns array [[0,2,] [2,4,]]
y.compressed()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
ii = unmasked_index_ranges(ma.getmaskarray(y), compressed=False)
# returns array [[0, 2], [3, 5]]
y.filled()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
Prior to the transforms refactoring, this was used to support
masked arrays in Line2D.
'''
mask = mask.reshape(mask.size)
m = np.concatenate(((1,), mask, (1,)))
indices = np.arange(len(mask) + 1)
mdif = m[1:] - m[:-1]
i0 = np.compress(mdif == -1, indices)
i1 = np.compress(mdif == 1, indices)
assert len(i0) == len(i1)
if len(i1) == 0:
return None # Maybe this should be np.zeros((0,2), dtype=int)
if not compressed:
return np.concatenate((i0[:, np.newaxis], i1[:, np.newaxis]), axis=1)
seglengths = i1 - i0
breakpoints = np.cumsum(seglengths)
ic0 = np.concatenate(((0,), breakpoints[:-1]))
ic1 = breakpoints
return np.concatenate((ic0[:, np.newaxis], ic1[:, np.newaxis]), axis=1)
# a dict to cross-map linestyle arguments
_linestyles = [('-', 'solid'),
('--', 'dashed'),
('-.', 'dashdot'),
(':', 'dotted')]
ls_mapper = dict(_linestyles)
ls_mapper.update([(ls[1], ls[0]) for ls in _linestyles])
def less_simple_linear_interpolation( x, y, xi, extrap=False ):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('less_simple_linear_interpolation has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.less_simple_linear_interpolation( x, y, xi, extrap=extrap )
def isvector(X):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('isvector has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.isvector( x, y, xi, extrap=extrap )
def vector_lengths( X, P=2., axis=None ):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('vector_lengths has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.vector_lengths( X, P=2., axis=axis )
def distances_along_curve( X ):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('distances_along_curve has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.distances_along_curve( X )
def path_length(X):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('path_length has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.path_length(X)
def is_closed_polygon(X):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('is_closed_polygon has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.is_closed_polygon(X)
def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('quad2cubic has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y)
if __name__=='__main__':
assert( allequal([1,1,1]) )
assert(not allequal([1,1,0]) )
assert( allequal([]) )
assert( allequal(('a', 'a')))
assert( not allequal(('a', 'b')))
| agpl-3.0 |
glemaitre/UnbalancedDataset | imblearn/tests/test_pipeline.py | 2 | 34905 | """
Test the pipeline module.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
from tempfile import mkdtemp
import shutil
import time
import numpy as np
from pytest import raises
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_allclose
from sklearn.base import clone, BaseEstimator
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.datasets import load_iris, make_classification
from sklearn.preprocessing import StandardScaler
from sklearn.externals.joblib import Memory
from imblearn.pipeline import Pipeline, make_pipeline
from imblearn.under_sampling import (RandomUnderSampler,
EditedNearestNeighbours as ENN)
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
R_TOL = 1e-4
class NoFit(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class NoTrans(NoFit):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class NoInvTransf(NoTrans):
def transform(self, X, y=None):
return X
class Transf(NoInvTransf):
def transform(self, X, y=None):
return X
def inverse_transform(self, X):
return X
class TransfFitParams(Transf):
def fit(self, X, y, **fit_params):
self.fit_params = fit_params
return self
class Mult(BaseEstimator):
def __init__(self, mult=1):
self.mult = mult
def fit(self, X, y):
return self
def transform(self, X):
return np.asarray(X) * self.mult
def inverse_transform(self, X):
return np.asarray(X) / self.mult
def predict(self, X):
return (np.asarray(X) * self.mult).sum(axis=1)
predict_proba = predict_log_proba = decision_function = predict
def score(self, X, y=None):
return np.sum(X)
class FitParamT(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
self.successful = False
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def fit_predict(self, X, y, should_succeed=False):
self.fit(X, y, should_succeed=should_succeed)
return self.predict(X)
def score(self, X, y=None, sample_weight=None):
if sample_weight is not None:
X = X * sample_weight
return np.sum(X)
class DummyTransf(Transf):
"""Transformer which store the column means"""
def fit(self, X, y):
self.means_ = np.mean(X, axis=0)
# store timestamp to figure out whether the result of 'fit' has been
# cached or not
self.timestamp_ = time.time()
return self
class DummySampler(NoTrans):
"""Samplers which returns a balanced number of samples"""
def fit(self, X, y):
self.means_ = np.mean(X, axis=0)
# store timestamp to figure out whether the result of 'fit' has been
# cached or not
self.timestamp_ = time.time()
return self
def sample(self, X, y):
return X, y
def fit_sample(self, X, y):
return self.fit(X, y).sample(X, y)
class FitTransformSample(NoTrans):
"""Estimator implementing both transform and sample
"""
def fit(self, X, y, should_succeed=False):
pass
def sample(self, X, y=None):
return X, y
def transform(self, X, y=None):
return X
def test_pipeline_init():
# Test the various init parameters of the pipeline.
with raises(TypeError):
Pipeline()
# Check that we can't instantiate pipelines with objects without fit
# method
error_regex = 'Last step of Pipeline should implement fit. .*NoFit.*'
with raises(TypeError, match=error_regex):
Pipeline([('clf', NoFit())])
# Smoke test with only an estimator
clf = NoTrans()
pipe = Pipeline([('svc', clf)])
expected = dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False))
assert pipe.get_params(deep=True) == expected
# Check that params are set
pipe.set_params(svc__a=0.1)
assert clf.a == 0.1
assert clf.b is None
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't instantiate with non-transformers on the way
# Note that NoTrans implements fit, but not transform
error_regex = 'implement fit and transform or sample'
with raises(TypeError, match=error_regex):
Pipeline([('t', NoTrans()), ('svc', clf)])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert clf.C == 0.1
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
with raises(ValueError):
pipe.set_params(anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert not pipe.named_steps['svc'] is pipe2.named_steps['svc']
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert params == params2
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert pipe.predict(None)
# and transformer params should not be changed
assert pipe.named_steps['transf'].a is None
assert pipe.named_steps['transf'].b is None
# invalid parameters should raise an error message
with raises(TypeError, match="unexpected keyword argument"):
pipe.fit(None, None, clf__bad=True)
def test_pipeline_sample_weight_supported():
# Pipeline should pass sample_weight
X = np.array([[1, 2]])
pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())])
pipe.fit(X, y=None)
assert pipe.score(X) == 3
assert pipe.score(X, y=None) == 3
assert pipe.score(X, y=None, sample_weight=None) == 3
assert pipe.score(X, sample_weight=np.array([2, 3])) == 8
def test_pipeline_sample_weight_unsupported():
# When sample_weight is None it shouldn't be passed
X = np.array([[1, 2]])
pipe = Pipeline([('transf', Transf()), ('clf', Mult())])
pipe.fit(X, y=None)
assert pipe.score(X) == 3
assert pipe.score(X, sample_weight=None) == 3
with raises(TypeError, match="unexpected keyword argument"):
pipe.score(X, sample_weight=np.array([2, 3]))
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
with raises(ValueError, match="Invalid parameter"):
pipe.set_params(fake='nope')
# nested model check
with raises(ValueError, match="Invalid parameter"):
pipe.set_params(fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(svd_solver='full', n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = PCA(n_components=2, svd_solver='randomized', whiten=True)
clf = SVC(probability=True, random_state=0, decision_function_shape='ovr')
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert predict.shape == (n_samples,)
proba = pipe.predict_proba(X)
assert proba.shape == (n_samples, n_classes)
log_proba = pipe.predict_log_proba(X)
assert log_proba.shape == (n_samples, n_classes)
decision_function = pipe.decision_function(X)
assert decision_function.shape == (n_samples, n_classes)
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# As pipeline doesn't clone estimators on construction,
# it must have its own estimators
scaler_for_pipeline = StandardScaler()
km_for_pipeline = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([
('scaler', scaler_for_pipeline),
('Kmeans', km_for_pipeline)
])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA(svd_solver='full')
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
error_regex = "'PCA' object has no attribute 'fit_predict'"
with raises(AttributeError, match=error_regex):
getattr(pipe, 'fit_predict')
def test_fit_predict_with_intermediate_fit_params():
# tests that Pipeline passes fit_params to intermediate steps
# when fit_predict is invoked
pipe = Pipeline([('transf', TransfFitParams()), ('clf', FitParamT())])
pipe.fit_predict(X=None,
y=None,
transf__should_get_this=True,
clf__should_succeed=True)
assert pipe.named_steps['transf'].fit_params['should_get_this']
assert pipe.named_steps['clf'].successful
assert 'should_succeed' not in pipe.named_steps['transf'].fit_params
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2, svd_solver='full')
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transf = Transf()
pipeline = Pipeline([('mock', transf)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transf.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_set_pipeline_steps():
transf1 = Transf()
transf2 = Transf()
pipeline = Pipeline([('mock', transf1)])
assert pipeline.named_steps['mock'] is transf1
# Directly setting attr
pipeline.steps = [('mock2', transf2)]
assert 'mock' not in pipeline.named_steps
assert pipeline.named_steps['mock2'] is transf2
assert [('mock2', transf2)] == pipeline.steps
# Using set_params
pipeline.set_params(steps=[('mock', transf1)])
assert [('mock', transf1)] == pipeline.steps
# Using set_params to replace single step
pipeline.set_params(mock=transf2)
assert [('mock', transf2)] == pipeline.steps
# With invalid data
pipeline.set_params(steps=[('junk', ())])
with raises(TypeError):
pipeline.fit([[1]], [1])
with raises(TypeError):
pipeline.fit_transform([[1]], [1])
def test_set_pipeline_step_none():
# Test setting Pipeline steps to None
X = np.array([[1]])
y = np.array([1])
mult2 = Mult(mult=2)
mult3 = Mult(mult=3)
mult5 = Mult(mult=5)
def make():
return Pipeline([('m2', mult2), ('m3', mult3), ('last', mult5)])
pipeline = make()
exp = 2 * 3 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
pipeline.set_params(m3=None)
exp = 2 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
expected_params = {'steps': pipeline.steps,
'm2': mult2,
'm3': None,
'last': mult5,
'memory': None,
'm2__mult': 2,
'last__mult': 5}
assert pipeline.get_params(deep=True) == expected_params
pipeline.set_params(m2=None)
exp = 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
# for other methods, ensure no AttributeErrors on None:
other_methods = ['predict_proba', 'predict_log_proba',
'decision_function', 'transform', 'score']
for method in other_methods:
getattr(pipeline, method)(X)
pipeline.set_params(m2=mult2)
exp = 2 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
pipeline = make()
pipeline.set_params(last=None)
# mult2 and mult3 are active
exp = 6
pipeline.fit(X, y)
pipeline.transform(X)
assert_array_equal([[exp]], pipeline.fit(X, y).transform(X))
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
with raises(AttributeError, match="has no attribute 'predict'"):
getattr(pipeline, 'predict')
# Check None step at construction time
exp = 2 * 5
pipeline = Pipeline([('m2', mult2), ('m3', None), ('last', mult5)])
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
def test_pipeline_ducktyping():
pipeline = make_pipeline(Mult(5))
pipeline.predict
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf())
assert not hasattr(pipeline, 'predict')
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(None)
assert not hasattr(pipeline, 'predict')
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf(), NoInvTransf())
assert not hasattr(pipeline, 'predict')
pipeline.transform
assert not hasattr(pipeline, 'inverse_transform')
pipeline = make_pipeline(NoInvTransf(), Transf())
assert not hasattr(pipeline, 'predict')
pipeline.transform
assert not hasattr(pipeline, 'inverse_transform')
def test_make_pipeline():
t1 = Transf()
t2 = Transf()
pipe = make_pipeline(t1, t2)
assert isinstance(pipe, Pipeline)
assert pipe.steps[0][0] == "transf-1"
assert pipe.steps[1][0] == "transf-2"
pipe = make_pipeline(t1, t2, FitParamT())
assert isinstance(pipe, Pipeline)
assert pipe.steps[0][0] == "transf-1"
assert pipe.steps[1][0] == "transf-2"
assert pipe.steps[2][0] == "fitparamt"
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
with raises(AttributeError):
getattr(reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
with raises(AttributeError):
getattr(clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
def test_pipeline_wrong_memory():
# Test that an error is raised when memory is not a string or a Memory
# instance
iris = load_iris()
X = iris.data
y = iris.target
# Define memory as an integer
memory = 1
cached_pipe = Pipeline([('transf', DummyTransf()), ('svc', SVC())],
memory=memory)
error_regex = ("'memory' should either be a string or a joblib.Memory"
" instance, got 'memory=1' instead.")
with raises(ValueError, match=error_regex):
cached_pipe.fit(X, y)
def test_pipeline_memory_transformer():
iris = load_iris()
X = iris.data
y = iris.target
cachedir = mkdtemp()
try:
memory = Memory(cachedir=cachedir, verbose=10)
# Test with Transformer + SVC
clf = SVC(probability=True, random_state=0)
transf = DummyTransf()
pipe = Pipeline([('transf', clone(transf)), ('svc', clf)])
cached_pipe = Pipeline([('transf', transf), ('svc', clf)],
memory=memory)
# Memoize the transformer at the first fit
cached_pipe.fit(X, y)
pipe.fit(X, y)
# Get the time stamp of the tranformer in the cached pipeline
expected_ts = cached_pipe.named_steps['transf'].timestamp_
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe.predict(X))
assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X),
cached_pipe.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y))
assert_array_equal(pipe.named_steps['transf'].means_,
cached_pipe.named_steps['transf'].means_)
assert not hasattr(transf, 'means_')
# Check that we are reading the cache while fitting
# a second time
cached_pipe.fit(X, y)
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe.predict(X))
assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X),
cached_pipe.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y))
assert_array_equal(pipe.named_steps['transf'].means_,
cached_pipe.named_steps['transf'].means_)
assert cached_pipe.named_steps['transf'].timestamp_ == expected_ts
# Create a new pipeline with cloned estimators
# Check that even changing the name step does not affect the cache hit
clf_2 = SVC(probability=True, random_state=0)
transf_2 = DummyTransf()
cached_pipe_2 = Pipeline([('transf_2', transf_2), ('svc', clf_2)],
memory=memory)
cached_pipe_2.fit(X, y)
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe_2.predict(X))
assert_array_equal(pipe.predict_proba(X),
cached_pipe_2.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X),
cached_pipe_2.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe_2.score(X, y))
assert_array_equal(pipe.named_steps['transf'].means_,
cached_pipe_2.named_steps['transf_2'].means_)
assert cached_pipe_2.named_steps['transf_2'].timestamp_ == expected_ts
finally:
shutil.rmtree(cachedir)
def test_pipeline_memory_sampler():
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0)
cachedir = mkdtemp()
try:
memory = Memory(cachedir=cachedir, verbose=10)
# Test with Transformer + SVC
clf = SVC(probability=True, random_state=0)
transf = DummySampler()
pipe = Pipeline([('transf', clone(transf)), ('svc', clf)])
cached_pipe = Pipeline([('transf', transf), ('svc', clf)],
memory=memory)
# Memoize the transformer at the first fit
cached_pipe.fit(X, y)
pipe.fit(X, y)
# Get the time stamp of the tranformer in the cached pipeline
expected_ts = cached_pipe.named_steps['transf'].timestamp_
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe.predict(X))
assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X),
cached_pipe.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y))
assert_array_equal(pipe.named_steps['transf'].means_,
cached_pipe.named_steps['transf'].means_)
assert not hasattr(transf, 'means_')
# Check that we are reading the cache while fitting
# a second time
cached_pipe.fit(X, y)
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe.predict(X))
assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X),
cached_pipe.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y))
assert_array_equal(pipe.named_steps['transf'].means_,
cached_pipe.named_steps['transf'].means_)
assert cached_pipe.named_steps['transf'].timestamp_ == expected_ts
# Create a new pipeline with cloned estimators
# Check that even changing the name step does not affect the cache hit
clf_2 = SVC(probability=True, random_state=0)
transf_2 = DummySampler()
cached_pipe_2 = Pipeline([('transf_2', transf_2), ('svc', clf_2)],
memory=memory)
cached_pipe_2.fit(X, y)
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe_2.predict(X))
assert_array_equal(pipe.predict_proba(X),
cached_pipe_2.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X),
cached_pipe_2.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe_2.score(X, y))
assert_array_equal(pipe.named_steps['transf'].means_,
cached_pipe_2.named_steps['transf_2'].means_)
assert cached_pipe_2.named_steps['transf_2'].timestamp_ == expected_ts
finally:
shutil.rmtree(cachedir)
def test_pipeline_methods_pca_rus_svm():
# Test the various methods of the pipeline (pca + svm).
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0)
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA()
rus = RandomUnderSampler(random_state=0)
pipe = Pipeline([('pca', pca), ('rus', rus), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_rus_pca_svm():
# Test the various methods of the pipeline (pca + svm).
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0)
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA()
rus = RandomUnderSampler(random_state=0)
pipe = Pipeline([('rus', rus), ('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_sample():
# Test whether pipeline works with a sampler at the end.
# Also test pipeline.sampler
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0)
rus = RandomUnderSampler(random_state=0)
pipeline = Pipeline([('rus', rus)])
# test transform and fit_transform:
X_trans, y_trans = pipeline.fit(X, y).sample(X, y)
X_trans2, y_trans2 = pipeline.fit_sample(X, y)
X_trans3, y_trans3 = rus.fit_sample(X, y)
assert_allclose(X_trans, X_trans2, rtol=R_TOL)
assert_allclose(X_trans, X_trans3, rtol=R_TOL)
assert_allclose(y_trans, y_trans2, rtol=R_TOL)
assert_allclose(y_trans, y_trans3, rtol=R_TOL)
pca = PCA()
pipeline = Pipeline([('pca', PCA()),
('rus', rus)])
X_trans, y_trans = pipeline.fit(X, y).sample(X, y)
X_pca = pca.fit_transform(X)
X_trans2, y_trans2 = rus.fit_sample(X_pca, y)
# We round the value near to zero. It seems that PCA has some issue
# with that
X_trans[np.bitwise_and(X_trans < R_TOL, X_trans > -R_TOL)] = 0
X_trans2[np.bitwise_and(X_trans2 < R_TOL, X_trans2 > -R_TOL)] = 0
assert_allclose(X_trans, X_trans2, rtol=R_TOL)
assert_allclose(y_trans, y_trans2, rtol=R_TOL)
def test_pipeline_sample_transform():
# Test whether pipeline works with a sampler at the end.
# Also test pipeline.sampler
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0)
rus = RandomUnderSampler(random_state=0)
pca = PCA()
pca2 = PCA()
pipeline = Pipeline([('pca', pca), ('rus', rus), ('pca2', pca2)])
pipeline.fit(X, y).transform(X)
def test_pipeline_none_classifier():
# Test pipeline using None as preprocessing step and a classifier
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0)
clf = LogisticRegression(random_state=0)
pipe = make_pipeline(None, clf)
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.decision_function(X)
pipe.score(X, y)
def test_pipeline_none_sampler_classifier():
# Test pipeline using None, RUS and a classifier
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0)
clf = LogisticRegression(random_state=0)
rus = RandomUnderSampler(random_state=0)
pipe = make_pipeline(None, rus, clf)
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.decision_function(X)
pipe.score(X, y)
def test_pipeline_sampler_none_classifier():
# Test pipeline using RUS, None and a classifier
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0)
clf = LogisticRegression(random_state=0)
rus = RandomUnderSampler(random_state=0)
pipe = make_pipeline(rus, None, clf)
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.decision_function(X)
pipe.score(X, y)
def test_pipeline_none_sampler_sample():
# Test pipeline using None step and a sampler
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0)
rus = RandomUnderSampler(random_state=0)
pipe = make_pipeline(None, rus)
pipe.fit(X, y)
pipe.sample(X, y)
def test_pipeline_none_transformer():
# Test pipeline using None and a transformer that implements transform and
# inverse_transform
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0)
pca = PCA(whiten=True)
pipe = make_pipeline(None, pca)
pipe.fit(X, y)
X_trans = pipe.transform(X)
X_inversed = pipe.inverse_transform(X_trans)
assert_array_almost_equal(X, X_inversed)
def test_pipeline_methods_anova_rus():
# Test the various methods of the pipeline (anova).
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0)
# Test with RandomUnderSampling + Anova + LogisticRegression
clf = LogisticRegression()
rus = RandomUnderSampler(random_state=0)
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('rus', rus), ('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_with_step_that_implements_both_sample_and_transform():
# Test the various methods of the pipeline (anova).
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0)
clf = LogisticRegression()
with raises(TypeError):
Pipeline([('step', FitTransformSample()), ('logistic', clf)])
def test_pipeline_with_step_that_it_is_pipeline():
# Test the various methods of the pipeline (anova).
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0)
# Test with RandomUnderSampling + Anova + LogisticRegression
clf = LogisticRegression()
rus = RandomUnderSampler(random_state=0)
filter1 = SelectKBest(f_classif, k=2)
pipe1 = Pipeline([('rus', rus), ('anova', filter1)])
with raises(TypeError):
Pipeline([('pipe1', pipe1), ('logistic', clf)])
def test_pipeline_fit_then_sample_with_sampler_last_estimator():
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=50000,
random_state=0)
rus = RandomUnderSampler(random_state=42)
enn = ENN()
pipeline = make_pipeline(rus, enn)
X_fit_sample_resampled, y_fit_sample_resampled = pipeline.fit_sample(X, y)
pipeline = make_pipeline(rus, enn)
pipeline.fit(X, y)
X_fit_then_sample_res, y_fit_then_sample_res = pipeline.sample(X, y)
assert_array_equal(X_fit_sample_resampled, X_fit_then_sample_res)
assert_array_equal(y_fit_sample_resampled, y_fit_then_sample_res)
def test_pipeline_fit_then_sample_3_samplers_with_sampler_last_estimator():
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=50000,
random_state=0)
rus = RandomUnderSampler(random_state=42)
enn = ENN()
pipeline = make_pipeline(rus, enn, rus)
X_fit_sample_resampled, y_fit_sample_resampled = pipeline.fit_sample(X, y)
pipeline = make_pipeline(rus, enn, rus)
pipeline.fit(X, y)
X_fit_then_sample_res, y_fit_then_sample_res = pipeline.sample(X, y)
assert_array_equal(X_fit_sample_resampled, X_fit_then_sample_res)
assert_array_equal(y_fit_sample_resampled, y_fit_then_sample_res)
| mit |
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/pandas/io/tests/test_stata.py | 2 | 44490 | # pylint: disable=E1101
from datetime import datetime
import datetime as dt
import os
import warnings
import nose
import struct
import sys
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.compat import iterkeys
from pandas.core.frame import DataFrame, Series
from pandas.core.common import is_categorical_dtype
from pandas.io.parsers import read_csv
from pandas.io.stata import (read_stata, StataReader, InvalidColumnName,
PossiblePrecisionLoss, StataMissingValue)
import pandas.util.testing as tm
from pandas.tslib import NaT
from pandas import compat
class TestStata(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta')
self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta')
self.dta2_113 = os.path.join(self.dirpath, 'stata2_113.dta')
self.dta2_114 = os.path.join(self.dirpath, 'stata2_114.dta')
self.dta2_115 = os.path.join(self.dirpath, 'stata2_115.dta')
self.dta2_117 = os.path.join(self.dirpath, 'stata2_117.dta')
self.dta3_113 = os.path.join(self.dirpath, 'stata3_113.dta')
self.dta3_114 = os.path.join(self.dirpath, 'stata3_114.dta')
self.dta3_115 = os.path.join(self.dirpath, 'stata3_115.dta')
self.dta3_117 = os.path.join(self.dirpath, 'stata3_117.dta')
self.csv3 = os.path.join(self.dirpath, 'stata3.csv')
self.dta4_113 = os.path.join(self.dirpath, 'stata4_113.dta')
self.dta4_114 = os.path.join(self.dirpath, 'stata4_114.dta')
self.dta4_115 = os.path.join(self.dirpath, 'stata4_115.dta')
self.dta4_117 = os.path.join(self.dirpath, 'stata4_117.dta')
self.dta_encoding = os.path.join(self.dirpath, 'stata1_encoding.dta')
self.csv14 = os.path.join(self.dirpath, 'stata5.csv')
self.dta14_113 = os.path.join(self.dirpath, 'stata5_113.dta')
self.dta14_114 = os.path.join(self.dirpath, 'stata5_114.dta')
self.dta14_115 = os.path.join(self.dirpath, 'stata5_115.dta')
self.dta14_117 = os.path.join(self.dirpath, 'stata5_117.dta')
self.csv15 = os.path.join(self.dirpath, 'stata6.csv')
self.dta15_113 = os.path.join(self.dirpath, 'stata6_113.dta')
self.dta15_114 = os.path.join(self.dirpath, 'stata6_114.dta')
self.dta15_115 = os.path.join(self.dirpath, 'stata6_115.dta')
self.dta15_117 = os.path.join(self.dirpath, 'stata6_117.dta')
self.dta16_115 = os.path.join(self.dirpath, 'stata7_115.dta')
self.dta16_117 = os.path.join(self.dirpath, 'stata7_117.dta')
self.dta17_113 = os.path.join(self.dirpath, 'stata8_113.dta')
self.dta17_115 = os.path.join(self.dirpath, 'stata8_115.dta')
self.dta17_117 = os.path.join(self.dirpath, 'stata8_117.dta')
self.dta18_115 = os.path.join(self.dirpath, 'stata9_115.dta')
self.dta18_117 = os.path.join(self.dirpath, 'stata9_117.dta')
self.dta19_115 = os.path.join(self.dirpath, 'stata10_115.dta')
self.dta19_117 = os.path.join(self.dirpath, 'stata10_117.dta')
self.dta20_115 = os.path.join(self.dirpath, 'stata11_115.dta')
self.dta20_117 = os.path.join(self.dirpath, 'stata11_117.dta')
self.dta21_117 = os.path.join(self.dirpath, 'stata12_117.dta')
def read_dta(self, file):
# Legacy default reader configuration
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
def test_read_empty_dta(self):
empty_ds = DataFrame(columns=['unit'])
# GH 7369, make sure can read a 0-obs dta file
with tm.ensure_clean() as path:
empty_ds.to_stata(path,write_index=False)
empty_ds2 = read_stata(path)
tm.assert_frame_equal(empty_ds, empty_ds2)
def test_data_method(self):
# Minimal testing of legacy data method
reader_114 = StataReader(self.dta1_114)
with warnings.catch_warnings(record=True) as w:
parsed_114_data = reader_114.data()
reader_114 = StataReader(self.dta1_114)
parsed_114_read = reader_114.read()
tm.assert_frame_equal(parsed_114_data, parsed_114_read)
def test_read_dta1(self):
reader_114 = StataReader(self.dta1_114)
parsed_114 = reader_114.read()
reader_117 = StataReader(self.dta1_117)
parsed_117 = reader_117.read()
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
# this is an oddity as really the nan should be float64, but
# the casting doesn't fail so need to match stata here
expected['float_miss'] = expected['float_miss'].astype(np.float32)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta2(self):
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('datetime interp under 2.6 is faulty')
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1)
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1)
),
(
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
)
],
columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date',
'monthly_date', 'quarterly_date', 'half_yearly_date',
'yearly_date']
)
expected['yearly_date'] = expected['yearly_date'].astype('O')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed_114 = self.read_dta(self.dta2_114)
parsed_115 = self.read_dta(self.dta2_115)
parsed_117 = self.read_dta(self.dta2_117)
# 113 is buggy due to limits of date format support in Stata
# parsed_113 = self.read_dta(self.dta2_113)
# Remove resource warnings
w = [x for x in w if x.category is UserWarning]
# should get warning for each call to read_dta
tm.assert_equal(len(w), 3)
# buggy test because of the NaT comparison on certain platforms
# Format 113 test fails since it does not support tc and tC formats
# tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta3(self):
parsed_113 = self.read_dta(self.dta3_113)
parsed_114 = self.read_dta(self.dta3_114)
parsed_115 = self.read_dta(self.dta3_115)
parsed_117 = self.read_dta(self.dta3_117)
# match stata here
expected = self.read_csv(self.csv3)
expected = expected.astype(np.float32)
expected['year'] = expected['year'].astype(np.int16)
expected['quarter'] = expected['quarter'].astype(np.int8)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta4(self):
parsed_113 = self.read_dta(self.dta4_113)
parsed_114 = self.read_dta(self.dta4_114)
parsed_115 = self.read_dta(self.dta4_115)
parsed_117 = self.read_dta(self.dta4_117)
expected = DataFrame.from_records(
[
["one", "ten", "one", "one", "one"],
["two", "nine", "two", "two", "two"],
["three", "eight", "three", "three", "three"],
["four", "seven", 4, "four", "four"],
["five", "six", 5, np.nan, "five"],
["six", "five", 6, np.nan, "six"],
["seven", "four", 7, np.nan, "seven"],
["eight", "three", 8, np.nan, "eight"],
["nine", "two", 9, np.nan, "nine"],
["ten", "one", "ten", np.nan, "ten"]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled'])
# these are all categoricals
expected = pd.concat([expected[col].astype('category') for col in expected], axis=1)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
# File containing strls
def test_read_dta12(self):
parsed_117 = self.read_dta(self.dta21_117)
expected = DataFrame.from_records(
[
[1, "abc", "abcdefghi"],
[3, "cba", "qwertywertyqwerty"],
[93, "", "strl"],
],
columns=['x', 'y', 'z'])
tm.assert_frame_equal(parsed_117, expected, check_dtype=False)
def test_read_write_dta5(self):
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_write_dta6(self):
original = self.read_csv(self.csv3)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['year'] = original['year'].astype(np.int32)
original['quarter'] = original['quarter'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_read_write_dta10(self):
original = DataFrame(data=[["string", "object", 1, 1.1,
np.datetime64('2003-12-25')]],
columns=['string', 'object', 'integer', 'floating',
'datetime'])
original["object"] = Series(original["object"], dtype=object)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['integer'] = original['integer'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, {'datetime': 'tc'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_stata_doc_examples(self):
with tm.ensure_clean() as path:
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.to_stata(path)
def test_encoding(self):
# GH 4626, proper encoding handling
raw = read_stata(self.dta_encoding)
encoded = read_stata(self.dta_encoding, encoding="latin-1")
result = encoded.kreis1849[0]
if compat.PY3:
expected = raw.kreis1849[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, compat.string_types)
else:
expected = raw.kreis1849.str.decode("latin-1")[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, unicode)
with tm.ensure_clean() as path:
encoded.to_stata(path,encoding='latin-1', write_index=False)
reread_encoded = read_stata(path, encoding='latin-1')
tm.assert_frame_equal(encoded, reread_encoded)
def test_read_write_dta11(self):
original = DataFrame([(1, 2, 3, 4)],
columns=['good', compat.u('b\u00E4d'), '8number', 'astringwithmorethan32characters______'])
formatted = DataFrame([(1, 2, 3, 4)],
columns=['good', 'b_d', '_8number', 'astringwithmorethan32characters_'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
# should get a warning for that format.
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta12(self):
original = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_1',
'astringwithmorethan32characters_2',
'+',
'-',
'short',
'delete'])
formatted = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_',
'_0astringwithmorethan32character',
'_',
'_1_',
'_short',
'_delete'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
tm.assert_equal(len(w), 1) # should get a warning for that format.
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta13(self):
s1 = Series(2**9, dtype=np.int16)
s2 = Series(2**17, dtype=np.int32)
s3 = Series(2**33, dtype=np.int64)
original = DataFrame({'int16': s1, 'int32': s2, 'int64': s3})
original.index.name = 'index'
formatted = original
formatted['int64'] = formatted['int64'].astype(np.float64)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
formatted)
def test_read_write_reread_dta14(self):
expected = self.read_csv(self.csv14)
cols = ['byte_', 'int_', 'long_', 'float_', 'double_']
for col in cols:
expected[col] = expected[col].convert_objects(convert_numeric=True)
expected['float_'] = expected['float_'].astype(np.float32)
expected['date_td'] = pd.to_datetime(expected['date_td'], coerce=True)
parsed_113 = self.read_dta(self.dta14_113)
parsed_113.index.name = 'index'
parsed_114 = self.read_dta(self.dta14_114)
parsed_114.index.name = 'index'
parsed_115 = self.read_dta(self.dta14_115)
parsed_115.index.name = 'index'
parsed_117 = self.read_dta(self.dta14_117)
parsed_117.index.name = 'index'
tm.assert_frame_equal(parsed_114, parsed_113)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
with tm.ensure_clean() as path:
parsed_114.to_stata(path, {'date_td': 'td'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), parsed_114)
def test_read_write_reread_dta15(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime, args=('%Y-%m-%d',))
parsed_113 = self.read_dta(self.dta15_113)
parsed_114 = self.read_dta(self.dta15_114)
parsed_115 = self.read_dta(self.dta15_115)
parsed_117 = self.read_dta(self.dta15_117)
tm.assert_frame_equal(expected, parsed_114)
tm.assert_frame_equal(parsed_113, parsed_114)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
def test_timestamp_and_label(self):
original = DataFrame([(1,)], columns=['var'])
time_stamp = datetime(2000, 2, 29, 14, 21)
data_label = 'This is a data file.'
with tm.ensure_clean() as path:
original.to_stata(path, time_stamp=time_stamp, data_label=data_label)
reader = StataReader(path)
parsed_time_stamp = dt.datetime.strptime(reader.time_stamp, ('%d %b %Y %H:%M'))
assert parsed_time_stamp == time_stamp
assert reader.data_label == data_label
def test_numeric_column_names(self):
original = DataFrame(np.reshape(np.arange(25.0), (5, 5)))
original.index.name = 'index'
with tm.ensure_clean() as path:
# should get a warning for that format.
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path), InvalidColumnName)
# should produce a single warning
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
columns = list(written_and_read_again.columns)
convert_col_name = lambda x: int(x[1])
written_and_read_again.columns = map(convert_col_name, columns)
tm.assert_frame_equal(original, written_and_read_again)
def test_nan_to_missing_value(self):
s1 = Series(np.arange(4.0), dtype=np.float32)
s2 = Series(np.arange(4.0), dtype=np.float64)
s1[::2] = np.nan
s2[1::2] = np.nan
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, original)
def test_no_index(self):
columns = ['x', 'y']
original = DataFrame(np.reshape(np.arange(10.0), (5, 2)),
columns=columns)
original.index.name = 'index_not_written'
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
written_and_read_again = self.read_dta(path)
tm.assertRaises(KeyError,
lambda: written_and_read_again['index_not_written'])
def test_string_no_dates(self):
s1 = Series(['a', 'A longer string'])
s2 = Series([1.0, 2.0], dtype=np.float64)
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_large_value_conversion(self):
s0 = Series([1, 99], dtype=np.int8)
s1 = Series([1, 127], dtype=np.int8)
s2 = Series([1, 2 ** 15 - 1], dtype=np.int16)
s3 = Series([1, 2 ** 63 - 1], dtype=np.int64)
original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3})
original.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path),
PossiblePrecisionLoss)
# should produce a single warning
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified['s1'] = Series(modified['s1'], dtype=np.int16)
modified['s2'] = Series(modified['s2'], dtype=np.int32)
modified['s3'] = Series(modified['s3'], dtype=np.float64)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_dates_invalid_column(self):
original = DataFrame([datetime(2006, 11, 19, 23, 13, 20)])
original.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path, {0: 'tc'}),
InvalidColumnName)
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified.columns = ['_0']
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_date_export_formats(self):
columns = ['tc', 'td', 'tw', 'tm', 'tq', 'th', 'ty']
conversions = dict(((c, c) for c in columns))
data = [datetime(2006, 11, 20, 23, 13, 20)] * len(columns)
original = DataFrame([data], columns=columns)
original.index.name = 'index'
expected_values = [datetime(2006, 11, 20, 23, 13, 20), # Time
datetime(2006, 11, 20), # Day
datetime(2006, 11, 19), # Week
datetime(2006, 11, 1), # Month
datetime(2006, 10, 1), # Quarter year
datetime(2006, 7, 1), # Half year
datetime(2006, 1, 1)] # Year
expected = DataFrame([expected_values], columns=columns)
expected.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, conversions)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_write_missing_strings(self):
original = DataFrame([["1"], [None]], columns=["foo"])
expected = DataFrame([["1"], [""]], columns=["foo"])
expected.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_bool_uint(self):
s0 = Series([0, 1, True], dtype=np.bool)
s1 = Series([0, 1, 100], dtype=np.uint8)
s2 = Series([0, 1, 255], dtype=np.uint8)
s3 = Series([0, 1, 2 ** 15 - 100], dtype=np.uint16)
s4 = Series([0, 1, 2 ** 16 - 1], dtype=np.uint16)
s5 = Series([0, 1, 2 ** 31 - 100], dtype=np.uint32)
s6 = Series([0, 1, 2 ** 32 - 1], dtype=np.uint32)
original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3,
's4': s4, 's5': s5, 's6': s6})
original.index.name = 'index'
expected = original.copy()
expected_types = (np.int8, np.int8, np.int16, np.int16, np.int32,
np.int32, np.float64)
for c, t in zip(expected.columns, expected_types):
expected[c] = expected[c].astype(t)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, expected)
def test_variable_labels(self):
sr_115 = StataReader(self.dta16_115).variable_labels()
sr_117 = StataReader(self.dta16_117).variable_labels()
keys = ('var1', 'var2', 'var3')
labels = ('label1', 'label2', 'label3')
for k,v in compat.iteritems(sr_115):
self.assertTrue(k in sr_117)
self.assertTrue(v == sr_117[k])
self.assertTrue(k in keys)
self.assertTrue(v in labels)
def test_minimal_size_col(self):
str_lens = (1, 100, 244)
s = {}
for str_len in str_lens:
s['s' + str(str_len)] = Series(['a' * str_len, 'b' * str_len, 'c' * str_len])
original = DataFrame(s)
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
sr = StataReader(path)
typlist = sr.typlist
variables = sr.varlist
formats = sr.fmtlist
for variable, fmt, typ in zip(variables, formats, typlist):
self.assertTrue(int(variable[1:]) == int(fmt[1:-1]))
self.assertTrue(int(variable[1:]) == typ)
def test_excessively_long_string(self):
str_lens = (1, 244, 500)
s = {}
for str_len in str_lens:
s['s' + str(str_len)] = Series(['a' * str_len, 'b' * str_len, 'c' * str_len])
original = DataFrame(s)
with tm.assertRaises(ValueError):
with tm.ensure_clean() as path:
original.to_stata(path)
def test_missing_value_generator(self):
types = ('b','h','l')
df = DataFrame([[0.0]],columns=['float_'])
with tm.ensure_clean() as path:
df.to_stata(path)
valid_range = StataReader(path).VALID_RANGE
expected_values = ['.' + chr(97 + i) for i in range(26)]
expected_values.insert(0, '.')
for t in types:
offset = valid_range[t][1]
for i in range(0,27):
val = StataMissingValue(offset+1+i)
self.assertTrue(val.string == expected_values[i])
# Test extremes for floats
val = StataMissingValue(struct.unpack('<f',b'\x00\x00\x00\x7f')[0])
self.assertTrue(val.string == '.')
val = StataMissingValue(struct.unpack('<f',b'\x00\xd0\x00\x7f')[0])
self.assertTrue(val.string == '.z')
# Test extremes for floats
val = StataMissingValue(struct.unpack('<d',b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0])
self.assertTrue(val.string == '.')
val = StataMissingValue(struct.unpack('<d',b'\x00\x00\x00\x00\x00\x1a\xe0\x7f')[0])
self.assertTrue(val.string == '.z')
def test_missing_value_conversion(self):
columns = ['int8_', 'int16_', 'int32_', 'float32_', 'float64_']
smv = StataMissingValue(101)
keys = [key for key in iterkeys(smv.MISSING_VALUES)]
keys.sort()
data = []
for i in range(27):
row = [StataMissingValue(keys[i+(j*27)]) for j in range(5)]
data.append(row)
expected = DataFrame(data,columns=columns)
parsed_113 = read_stata(self.dta17_113, convert_missing=True)
parsed_115 = read_stata(self.dta17_115, convert_missing=True)
parsed_117 = read_stata(self.dta17_117, convert_missing=True)
tm.assert_frame_equal(expected, parsed_113)
tm.assert_frame_equal(expected, parsed_115)
tm.assert_frame_equal(expected, parsed_117)
def test_big_dates(self):
yr = [1960, 2000, 9999, 100, 2262, 1677]
mo = [1, 1, 12, 1, 4, 9]
dd = [1, 1, 31, 1, 22, 23]
hr = [0, 0, 23, 0, 0, 0]
mm = [0, 0, 59, 0, 0, 0]
ss = [0, 0, 59, 0, 0, 0]
expected = []
for i in range(len(yr)):
row = []
for j in range(7):
if j == 0:
row.append(
datetime(yr[i], mo[i], dd[i], hr[i], mm[i], ss[i]))
elif j == 6:
row.append(datetime(yr[i], 1, 1))
else:
row.append(datetime(yr[i], mo[i], dd[i]))
expected.append(row)
expected.append([NaT] * 7)
columns = ['date_tc', 'date_td', 'date_tw', 'date_tm', 'date_tq',
'date_th', 'date_ty']
# Fixes for weekly, quarterly,half,year
expected[2][2] = datetime(9999,12,24)
expected[2][3] = datetime(9999,12,1)
expected[2][4] = datetime(9999,10,1)
expected[2][5] = datetime(9999,7,1)
expected[4][2] = datetime(2262,4,16)
expected[4][3] = expected[4][4] = datetime(2262,4,1)
expected[4][5] = expected[4][6] = datetime(2262,1,1)
expected[5][2] = expected[5][3] = expected[5][4] = datetime(1677,10,1)
expected[5][5] = expected[5][6] = datetime(1678,1,1)
expected = DataFrame(expected, columns=columns, dtype=np.object)
parsed_115 = read_stata(self.dta18_115)
parsed_117 = read_stata(self.dta18_117)
tm.assert_frame_equal(expected, parsed_115)
tm.assert_frame_equal(expected, parsed_117)
date_conversion = dict((c, c[-2:]) for c in columns)
#{c : c[-2:] for c in columns}
with tm.ensure_clean() as path:
expected.index.name = 'index'
expected.to_stata(path, date_conversion)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_dtype_conversion(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
no_conversion = read_stata(self.dta15_117,
convert_dates=True)
tm.assert_frame_equal(expected, no_conversion)
conversion = read_stata(self.dta15_117,
convert_dates=True,
preserve_dtypes=False)
# read_csv types are the same
expected = self.read_csv(self.csv15)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
tm.assert_frame_equal(expected, conversion)
def test_drop_column(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
columns = ['byte_', 'int_', 'long_']
expected = expected[columns]
dropped = read_stata(self.dta15_117, convert_dates=True,
columns=columns)
tm.assert_frame_equal(expected, dropped)
with tm.assertRaises(ValueError):
columns = ['byte_', 'byte_']
read_stata(self.dta15_117, convert_dates=True, columns=columns)
with tm.assertRaises(ValueError):
columns = ['byte_', 'int_', 'long_', 'not_found']
read_stata(self.dta15_117, convert_dates=True, columns=columns)
def test_categorical_writing(self):
original = DataFrame.from_records(
[
["one", "ten", "one", "one", "one", 1],
["two", "nine", "two", "two", "two", 2],
["three", "eight", "three", "three", "three", 3],
["four", "seven", 4, "four", "four", 4],
["five", "six", 5, np.nan, "five", 5],
["six", "five", 6, np.nan, "six", 6],
["seven", "four", 7, np.nan, "seven", 7],
["eight", "three", 8, np.nan, "eight", 8],
["nine", "two", 9, np.nan, "nine", 9],
["ten", "one", "ten", np.nan, "ten", 10]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled', 'unlabeled'])
expected = original.copy()
# these are all categoricals
original = pd.concat([original[col].astype('category') for col in original], axis=1)
expected['incompletely_labeled'] = expected['incompletely_labeled'].apply(str)
expected['unlabeled'] = expected['unlabeled'].apply(str)
expected = pd.concat([expected[col].astype('category') for col in expected], axis=1)
expected.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
# Silence warnings
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), expected)
def test_categorical_warnings_and_errors(self):
# Warning for non-string labels
# Error for labels too long
original = pd.DataFrame.from_records(
[['a' * 10000],
['b' * 10000],
['c' * 10000],
['d' * 10000]],
columns=['Too_long'])
original = pd.concat([original[col].astype('category') for col in original], axis=1)
with tm.ensure_clean() as path:
tm.assertRaises(ValueError, original.to_stata, path)
original = pd.DataFrame.from_records(
[['a'],
['b'],
['c'],
['d'],
[1]],
columns=['Too_long'])
original = pd.concat([original[col].astype('category') for col in original], axis=1)
with warnings.catch_warnings(record=True) as w:
original.to_stata(path)
tm.assert_equal(len(w), 1) # should get a warning for mixed content
def test_categorical_with_stata_missing_values(self):
values = [['a' + str(i)] for i in range(120)]
values.append([np.nan])
original = pd.DataFrame.from_records(values, columns=['many_labels'])
original = pd.concat([original[col].astype('category') for col in original], axis=1)
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), original)
def test_categorical_order(self):
# Directly construct using expected codes
# Format is is_cat, col_name, labels (in order), underlying data
expected = [(True, 'ordered', ['a', 'b', 'c', 'd', 'e'], np.arange(5)),
(True, 'reverse', ['a', 'b', 'c', 'd', 'e'], np.arange(5)[::-1]),
(True, 'noorder', ['a', 'b', 'c', 'd', 'e'], np.array([2, 1, 4, 0, 3])),
(True, 'floating', ['a', 'b', 'c', 'd', 'e'], np.arange(0, 5)),
(True, 'float_missing', ['a', 'd', 'e'], np.array([0, 1, 2, -1, -1])),
(False, 'nolabel', [1.0, 2.0, 3.0, 4.0, 5.0], np.arange(5)),
(True, 'int32_mixed', ['d', 2, 'e', 'b', 'a'], np.arange(5))]
cols = []
for is_cat, col, labels, codes in expected:
if is_cat:
cols.append((col, pd.Categorical.from_codes(codes, labels)))
else:
cols.append((col, pd.Series(labels, dtype=np.float32)))
expected = DataFrame.from_items(cols)
# Read with and with out categoricals, ensure order is identical
parsed_115 = read_stata(self.dta19_115)
parsed_117 = read_stata(self.dta19_117)
tm.assert_frame_equal(expected, parsed_115)
tm.assert_frame_equal(expected, parsed_117)
# Check identity of codes
for col in expected:
if is_categorical_dtype(expected[col]):
tm.assert_series_equal(expected[col].cat.codes,
parsed_115[col].cat.codes)
tm.assert_index_equal(expected[col].cat.categories,
parsed_115[col].cat.categories)
def test_categorical_sorting(self):
parsed_115 = read_stata(self.dta20_115)
parsed_117 = read_stata(self.dta20_117)
# Sort based on codes, not strings
parsed_115 = parsed_115.sort("srh")
parsed_117 = parsed_117.sort("srh")
# Don't sort index
parsed_115.index = np.arange(parsed_115.shape[0])
parsed_117.index = np.arange(parsed_117.shape[0])
codes = [-1, -1, 0, 1, 1, 1, 2, 2, 3, 4]
categories = ["Poor", "Fair", "Good", "Very good", "Excellent"]
expected = pd.Series(pd.Categorical.from_codes(codes=codes,
categories=categories))
tm.assert_series_equal(expected, parsed_115["srh"])
tm.assert_series_equal(expected, parsed_117["srh"])
def test_categorical_ordering(self):
parsed_115 = read_stata(self.dta19_115)
parsed_117 = read_stata(self.dta19_117)
parsed_115_unordered = read_stata(self.dta19_115,
order_categoricals=False)
parsed_117_unordered = read_stata(self.dta19_117,
order_categoricals=False)
for col in parsed_115:
if not is_categorical_dtype(parsed_115[col]):
continue
tm.assert_equal(True, parsed_115[col].cat.ordered)
tm.assert_equal(True, parsed_117[col].cat.ordered)
tm.assert_equal(False, parsed_115_unordered[col].cat.ordered)
tm.assert_equal(False, parsed_117_unordered[col].cat.ordered)
def test_read_chunks_117(self):
files_117 = [self.dta1_117, self.dta2_117, self.dta3_117,
self.dta4_117, self.dta14_117, self.dta15_117,
self.dta16_117, self.dta17_117, self.dta18_117,
self.dta19_117, self.dta20_117]
for fname in files_117:
for chunksize in 1,2:
for convert_categoricals in False, True:
for convert_dates in False, True:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed = read_stata(fname, convert_categoricals=convert_categoricals,
convert_dates=convert_dates)
itr = read_stata(fname, iterator=True)
pos = 0
for j in range(5):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
try:
chunk = itr.read(chunksize)
except StopIteration:
break
from_frame = parsed.iloc[pos:pos+chunksize, :]
try:
tm.assert_frame_equal(from_frame, chunk, check_dtype=False)
except AssertionError:
# datetime.datetime and pandas.tslib.Timestamp may hold
# equivalent values but fail assert_frame_equal
assert(all([x == y for x, y in zip(from_frame, chunk)]))
pos += chunksize
def test_iterator(self):
fname = self.dta3_117
parsed = read_stata(fname)
itr = read_stata(fname, iterator=True)
chunk = itr.read(5)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
itr = read_stata(fname, chunksize=5)
chunk = list(itr)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk[0])
itr = read_stata(fname, iterator=True)
chunk = itr.get_chunk(5)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
itr = read_stata(fname, chunksize=5)
chunk = itr.get_chunk()
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
def test_read_chunks_115(self):
files_115 = [self.dta2_115, self.dta3_115, self.dta4_115,
self.dta14_115, self.dta15_115, self.dta16_115,
self.dta17_115, self.dta18_115, self.dta19_115,
self.dta20_115]
for fname in files_115:
for chunksize in 1,2:
for convert_categoricals in False, True:
for convert_dates in False, True:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed = read_stata(fname, convert_categoricals=convert_categoricals,
convert_dates=convert_dates)
itr = read_stata(fname, iterator=True,
convert_categoricals=convert_categoricals)
pos = 0
for j in range(5):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
try:
chunk = itr.read(chunksize)
except StopIteration:
break
from_frame = parsed.iloc[pos:pos+chunksize, :]
try:
tm.assert_frame_equal(from_frame, chunk, check_dtype=False)
except AssertionError:
# datetime.datetime and pandas.tslib.Timestamp may hold
# equivalent values but fail assert_frame_equal
assert(all([x == y for x, y in zip(from_frame, chunk)]))
pos += chunksize
def test_read_chunks_columns(self):
fname = self.dta3_117
columns = ['quarter', 'cpi', 'm1']
chunksize = 2
parsed = read_stata(fname, columns=columns)
itr = read_stata(fname, iterator=True)
pos = 0
for j in range(5):
chunk = itr.read(chunksize, columns=columns)
if chunk is None:
break
from_frame = parsed.iloc[pos:pos+chunksize, :]
tm.assert_frame_equal(from_frame, chunk, check_dtype=False)
pos += chunksize
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-2.0 |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/pandas/tools/tests/test_merge.py | 1 | 74672 | # pylint: disable=E1103
import nose
from datetime import datetime
from numpy.random import randn
from numpy import nan
import numpy as np
import random
from pandas.compat import range, lrange, lzip, zip
from pandas import compat, _np_version_under1p7
from pandas.tseries.index import DatetimeIndex
from pandas.tools.merge import merge, concat, ordered_merge, MergeError
from pandas.util.testing import (assert_frame_equal, assert_series_equal,
assert_almost_equal, rands,
makeCustomDataframe as mkdf,
assertRaisesRegexp)
from pandas import isnull, DataFrame, Index, MultiIndex, Panel, Series, date_range
import pandas.algos as algos
import pandas.util.testing as tm
a_ = np.array
N = 50
NGROUPS = 8
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
def get_test_data(ngroups=NGROUPS, n=N):
unique_groups = lrange(ngroups)
arr = np.asarray(np.tile(unique_groups, n // ngroups))
if len(arr) < n:
arr = np.asarray(list(arr) + unique_groups[:n - len(arr)])
random.shuffle(arr)
return arr
class TestMerge(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
# aggregate multiple columns
self.df = DataFrame({'key1': get_test_data(),
'key2': get_test_data(),
'data1': np.random.randn(N),
'data2': np.random.randn(N)})
# exclude a couple keys for fun
self.df = self.df[self.df['key2'] > 1]
self.df2 = DataFrame({'key1': get_test_data(n=N // 5),
'key2': get_test_data(ngroups=NGROUPS // 2,
n=N // 5),
'value': np.random.randn(N // 5)})
index, data = tm.getMixedTypeDict()
self.target = DataFrame(data, index=index)
# Join on string value
self.source = DataFrame({'MergedA': data['A'], 'MergedD': data['D']},
index=data['C'])
self.left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],
'v1': np.random.randn(7)})
self.right = DataFrame({'v2': np.random.randn(4)},
index=['d', 'b', 'c', 'a'])
def test_cython_left_outer_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64)
max_group = 5
ls, rs = algos.left_outer_join(left, right, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
exp_li = a_([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 7, 7, 8, 8, 9, 10])
exp_ri = a_([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3,
4, 5, 4, 5, 4, 5, -1, -1])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
self.assert_(np.array_equal(ls, exp_ls))
self.assert_(np.array_equal(rs, exp_rs))
def test_cython_right_outer_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64)
max_group = 5
rs, ls = algos.left_outer_join(right, left, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
# 0 1 1 1
exp_li = a_([0, 1, 2, 3, 4, 5, 3, 4, 5, 3, 4, 5,
# 2 2 4
6, 7, 8, 6, 7, 8, -1])
exp_ri = a_([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3,
4, 4, 4, 5, 5, 5, 6])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
self.assert_(np.array_equal(ls, exp_ls))
self.assert_(np.array_equal(rs, exp_rs))
def test_cython_inner_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1, 4], dtype=np.int64)
max_group = 5
ls, rs = algos.inner_join(left, right, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
exp_li = a_([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 7, 7, 8, 8])
exp_ri = a_([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3,
4, 5, 4, 5, 4, 5])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
self.assert_(np.array_equal(ls, exp_ls))
self.assert_(np.array_equal(rs, exp_rs))
def test_left_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='left')
joined_both = merge(self.df, self.df2)
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='left')
def test_right_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='right')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='right')
joined_both = merge(self.df, self.df2, how='right')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='right')
def test_full_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='outer')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='outer')
joined_both = merge(self.df, self.df2, how='outer')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='outer')
def test_inner_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='inner')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='inner')
joined_both = merge(self.df, self.df2, how='inner')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='inner')
def test_handle_overlap(self):
joined = merge(self.df, self.df2, on='key2',
suffixes=['.foo', '.bar'])
self.assert_('key1.foo' in joined)
self.assert_('key1.bar' in joined)
def test_handle_overlap_arbitrary_key(self):
joined = merge(self.df, self.df2,
left_on='key2', right_on='key1',
suffixes=['.foo', '.bar'])
self.assert_('key1.foo' in joined)
self.assert_('key2.bar' in joined)
def test_merge_common(self):
joined = merge(self.df, self.df2)
exp = merge(self.df, self.df2, on=['key1', 'key2'])
tm.assert_frame_equal(joined, exp)
def test_join_on(self):
target = self.target
source = self.source
merged = target.join(source, on='C')
self.assert_(np.array_equal(merged['MergedA'], target['A']))
self.assert_(np.array_equal(merged['MergedD'], target['D']))
# join with duplicates (fix regression from DataFrame/Matrix merge)
df = DataFrame({'key': ['a', 'a', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1, 2]}, index=['a', 'b', 'c'])
joined = df.join(df2, on='key')
expected = DataFrame({'key': ['a', 'a', 'b', 'b', 'c'],
'value': [0, 0, 1, 1, 2]})
assert_frame_equal(joined, expected)
# Test when some are missing
df_a = DataFrame([[1], [2], [3]], index=['a', 'b', 'c'],
columns=['one'])
df_b = DataFrame([['foo'], ['bar']], index=[1, 2],
columns=['two'])
df_c = DataFrame([[1], [2]], index=[1, 2],
columns=['three'])
joined = df_a.join(df_b, on='one')
joined = joined.join(df_c, on='one')
self.assert_(np.isnan(joined['two']['c']))
self.assert_(np.isnan(joined['three']['c']))
# merge column not p resent
self.assertRaises(Exception, target.join, source, on='E')
# overlap
source_copy = source.copy()
source_copy['A'] = 0
self.assertRaises(Exception, target.join, source_copy, on='A')
def test_join_on_fails_with_different_right_index(self):
with tm.assertRaises(ValueError):
df = DataFrame({'a': tm.choice(['m', 'f'], size=3),
'b': np.random.randn(3)})
df2 = DataFrame({'a': tm.choice(['m', 'f'], size=10),
'b': np.random.randn(10)},
index=tm.makeCustomIndex(10, 2))
merge(df, df2, left_on='a', right_index=True)
def test_join_on_fails_with_different_left_index(self):
with tm.assertRaises(ValueError):
df = DataFrame({'a': tm.choice(['m', 'f'], size=3),
'b': np.random.randn(3)},
index=tm.makeCustomIndex(10, 2))
df2 = DataFrame({'a': tm.choice(['m', 'f'], size=10),
'b': np.random.randn(10)})
merge(df, df2, right_on='b', left_index=True)
def test_join_on_fails_with_different_column_counts(self):
with tm.assertRaises(ValueError):
df = DataFrame({'a': tm.choice(['m', 'f'], size=3),
'b': np.random.randn(3)})
df2 = DataFrame({'a': tm.choice(['m', 'f'], size=10),
'b': np.random.randn(10)},
index=tm.makeCustomIndex(10, 2))
merge(df, df2, right_on='a', left_on=['a', 'b'])
def test_join_on_pass_vector(self):
expected = self.target.join(self.source, on='C')
del expected['C']
join_col = self.target.pop('C')
result = self.target.join(self.source, on=join_col)
assert_frame_equal(result, expected)
def test_join_with_len0(self):
# nothing to merge
merged = self.target.join(self.source.reindex([]), on='C')
for col in self.source:
self.assert_(col in merged)
self.assert_(merged[col].isnull().all())
merged2 = self.target.join(self.source.reindex([]), on='C',
how='inner')
self.assert_(merged2.columns.equals(merged.columns))
self.assertEqual(len(merged2), 0)
def test_join_on_inner(self):
df = DataFrame({'key': ['a', 'a', 'd', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1]}, index=['a', 'b'])
joined = df.join(df2, on='key', how='inner')
expected = df.join(df2, on='key')
expected = expected[expected['value'].notnull()]
self.assert_(np.array_equal(joined['key'], expected['key']))
self.assert_(np.array_equal(joined['value'], expected['value']))
self.assert_(joined.index.equals(expected.index))
def test_join_on_singlekey_list(self):
df = DataFrame({'key': ['a', 'a', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1, 2]}, index=['a', 'b', 'c'])
# corner cases
joined = df.join(df2, on=['key'])
expected = df.join(df2, on='key')
assert_frame_equal(joined, expected)
def test_join_on_series(self):
result = self.target.join(self.source['MergedA'], on='C')
expected = self.target.join(self.source[['MergedA']], on='C')
assert_frame_equal(result, expected)
def test_join_on_series_buglet(self):
# GH #638
df = DataFrame({'a': [1, 1]})
ds = Series([2], index=[1], name='b')
result = df.join(ds, on='a')
expected = DataFrame({'a': [1, 1],
'b': [2, 2]}, index=df.index)
tm.assert_frame_equal(result, expected)
def test_join_index_mixed(self):
df1 = DataFrame({'A': 1., 'B': 2, 'C': 'foo', 'D': True},
index=np.arange(10),
columns=['A', 'B', 'C', 'D'])
self.assert_(df1['B'].dtype == np.int64)
self.assert_(df1['D'].dtype == np.bool_)
df2 = DataFrame({'A': 1., 'B': 2, 'C': 'foo', 'D': True},
index=np.arange(0, 10, 2),
columns=['A', 'B', 'C', 'D'])
# overlap
joined = df1.join(df2, lsuffix='_one', rsuffix='_two')
expected_columns = ['A_one', 'B_one', 'C_one', 'D_one',
'A_two', 'B_two', 'C_two', 'D_two']
df1.columns = expected_columns[:4]
df2.columns = expected_columns[4:]
expected = _join_by_hand(df1, df2)
assert_frame_equal(joined, expected)
# no overlapping blocks
df1 = DataFrame(index=np.arange(10))
df1['bool'] = True
df1['string'] = 'foo'
df2 = DataFrame(index=np.arange(5, 15))
df2['int'] = 1
df2['float'] = 1.
for kind in JOIN_TYPES:
joined = df1.join(df2, how=kind)
expected = _join_by_hand(df1, df2, how=kind)
assert_frame_equal(joined, expected)
joined = df2.join(df1, how=kind)
expected = _join_by_hand(df2, df1, how=kind)
assert_frame_equal(joined, expected)
def test_join_empty_bug(self):
# generated an exception in 0.4.3
x = DataFrame()
x.join(DataFrame([3], index=[0], columns=['A']), how='outer')
def test_join_unconsolidated(self):
# GH #331
a = DataFrame(randn(30, 2), columns=['a', 'b'])
c = Series(randn(30))
a['c'] = c
d = DataFrame(randn(30, 1), columns=['q'])
# it works!
a.join(d)
d.join(a)
def test_join_multiindex(self):
index1 = MultiIndex.from_arrays([['a', 'a', 'a', 'b', 'b', 'b'],
[1, 2, 3, 1, 2, 3]],
names=['first', 'second'])
index2 = MultiIndex.from_arrays([['b', 'b', 'b', 'c', 'c', 'c'],
[1, 2, 3, 1, 2, 3]],
names=['first', 'second'])
df1 = DataFrame(data=np.random.randn(6), index=index1,
columns=['var X'])
df2 = DataFrame(data=np.random.randn(6), index=index2,
columns=['var Y'])
df1 = df1.sortlevel(0)
df2 = df2.sortlevel(0)
joined = df1.join(df2, how='outer')
ex_index = index1._tuple_index + index2._tuple_index
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
assert_frame_equal(joined, expected)
self.assertEqual(joined.index.names, index1.names)
df1 = df1.sortlevel(1)
df2 = df2.sortlevel(1)
joined = df1.join(df2, how='outer').sortlevel(0)
ex_index = index1._tuple_index + index2._tuple_index
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
assert_frame_equal(joined, expected)
self.assertEqual(joined.index.names, index1.names)
def test_join_inner_multiindex(self):
key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux',
'qux', 'snap']
key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two',
'three', 'one']
data = np.random.randn(len(key1))
data = DataFrame({'key1': key1, 'key2': key2,
'data': data})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
to_join = DataFrame(np.random.randn(10, 3), index=index,
columns=['j_one', 'j_two', 'j_three'])
joined = data.join(to_join, on=['key1', 'key2'], how='inner')
expected = merge(data, to_join.reset_index(),
left_on=['key1', 'key2'],
right_on=['first', 'second'], how='inner',
sort=False)
expected2 = merge(to_join, data,
right_on=['key1', 'key2'], left_index=True,
how='inner', sort=False)
assert_frame_equal(joined, expected2.reindex_like(joined))
expected2 = merge(to_join, data, right_on=['key1', 'key2'],
left_index=True, how='inner', sort=False)
expected = expected.drop(['first', 'second'], axis=1)
expected.index = joined.index
self.assert_(joined.index.is_monotonic)
assert_frame_equal(joined, expected)
# _assert_same_contents(expected, expected2.ix[:, expected.columns])
def test_join_hierarchical_mixed(self):
df = DataFrame([(1, 2, 3), (4, 5, 6)], columns=['a', 'b', 'c'])
new_df = df.groupby(['a']).agg({'b': [np.mean, np.sum]})
other_df = DataFrame(
[(1, 2, 3), (7, 10, 6)], columns=['a', 'b', 'd'])
other_df.set_index('a', inplace=True)
result = merge(new_df, other_df, left_index=True, right_index=True)
self.assertTrue(('b', 'mean') in result)
self.assertTrue('b' in result)
def test_join_float64_float32(self):
a = DataFrame(randn(10, 2), columns=['a', 'b'], dtype = np.float64)
b = DataFrame(randn(10, 1), columns=['c'], dtype = np.float32)
joined = a.join(b)
self.assert_(joined.dtypes['a'] == 'float64')
self.assert_(joined.dtypes['b'] == 'float64')
self.assert_(joined.dtypes['c'] == 'float32')
a = np.random.randint(0, 5, 100).astype('int64')
b = np.random.random(100).astype('float64')
c = np.random.random(100).astype('float32')
df = DataFrame({'a': a, 'b': b, 'c': c})
xpdf = DataFrame({'a': a, 'b': b, 'c': c })
s = DataFrame(np.random.random(5).astype('float32'), columns=['md'])
rs = df.merge(s, left_on='a', right_index=True)
self.assert_(rs.dtypes['a'] == 'int64')
self.assert_(rs.dtypes['b'] == 'float64')
self.assert_(rs.dtypes['c'] == 'float32')
self.assert_(rs.dtypes['md'] == 'float32')
xp = xpdf.merge(s, left_on='a', right_index=True)
assert_frame_equal(rs, xp)
def test_join_many_non_unique_index(self):
df1 = DataFrame({"a": [1, 1], "b": [1, 1], "c": [10, 20]})
df2 = DataFrame({"a": [1, 1], "b": [1, 2], "d": [100, 200]})
df3 = DataFrame({"a": [1, 1], "b": [1, 2], "e": [1000, 2000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how='outer')
df_partially_merged = merge(df1, df2, on=['a', 'b'], how='outer')
expected = merge(df_partially_merged, df3, on=['a', 'b'], how='outer')
result = result.reset_index()
result['a'] = result['a'].astype(np.float64)
result['b'] = result['b'].astype(np.float64)
assert_frame_equal(result, expected.ix[:, result.columns])
df1 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 1], "c": [10, 20, 30]})
df2 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 2], "d": [100, 200, 300]})
df3 = DataFrame(
{"a": [1, 1, 1], "b": [1, 1, 2], "e": [1000, 2000, 3000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how='inner')
df_partially_merged = merge(df1, df2, on=['a', 'b'], how='inner')
expected = merge(df_partially_merged, df3, on=['a', 'b'], how='inner')
result = result.reset_index()
assert_frame_equal(result, expected.ix[:, result.columns])
def test_merge_index_singlekey_right_vs_left(self):
left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],
'v1': np.random.randn(7)})
right = DataFrame({'v2': np.random.randn(4)},
index=['d', 'b', 'c', 'a'])
merged1 = merge(left, right, left_on='key',
right_index=True, how='left', sort=False)
merged2 = merge(right, left, right_on='key',
left_index=True, how='right', sort=False)
assert_frame_equal(merged1, merged2.ix[:, merged1.columns])
merged1 = merge(left, right, left_on='key',
right_index=True, how='left', sort=True)
merged2 = merge(right, left, right_on='key',
left_index=True, how='right', sort=True)
assert_frame_equal(merged1, merged2.ix[:, merged1.columns])
def test_merge_index_singlekey_inner(self):
left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],
'v1': np.random.randn(7)})
right = DataFrame({'v2': np.random.randn(4)},
index=['d', 'b', 'c', 'a'])
# inner join
result = merge(left, right, left_on='key', right_index=True,
how='inner')
expected = left.join(right, on='key').ix[result.index]
assert_frame_equal(result, expected)
result = merge(right, left, right_on='key', left_index=True,
how='inner')
expected = left.join(right, on='key').ix[result.index]
assert_frame_equal(result, expected.ix[:, result.columns])
def test_merge_misspecified(self):
self.assertRaises(Exception, merge, self.left, self.right,
left_index=True)
self.assertRaises(Exception, merge, self.left, self.right,
right_index=True)
self.assertRaises(Exception, merge, self.left, self.left,
left_on='key', on='key')
self.assertRaises(Exception, merge, self.df, self.df2,
left_on=['key1'], right_on=['key1', 'key2'])
def test_merge_overlap(self):
merged = merge(self.left, self.left, on='key')
exp_len = (self.left['key'].value_counts() ** 2).sum()
self.assertEqual(len(merged), exp_len)
self.assert_('v1_x' in merged)
self.assert_('v1_y' in merged)
def test_merge_different_column_key_names(self):
left = DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 4]})
right = DataFrame({'rkey': ['foo', 'bar', 'qux', 'foo'],
'value': [5, 6, 7, 8]})
merged = left.merge(right, left_on='lkey', right_on='rkey',
how='outer', sort=True)
assert_almost_equal(merged['lkey'],
['bar', 'baz', 'foo', 'foo', 'foo', 'foo', np.nan])
assert_almost_equal(merged['rkey'],
['bar', np.nan, 'foo', 'foo', 'foo', 'foo', 'qux'])
assert_almost_equal(merged['value_x'], [2, 3, 1, 1, 4, 4, np.nan])
assert_almost_equal(merged['value_y'], [6, np.nan, 5, 8, 5, 8, 7])
def test_merge_nocopy(self):
left = DataFrame({'a': 0, 'b': 1}, index=lrange(10))
right = DataFrame({'c': 'foo', 'd': 'bar'}, index=lrange(10))
merged = merge(left, right, left_index=True,
right_index=True, copy=False)
merged['a'] = 6
self.assert_((left['a'] == 6).all())
merged['d'] = 'peekaboo'
self.assert_((right['d'] == 'peekaboo').all())
def test_join_sort(self):
left = DataFrame({'key': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 4]})
right = DataFrame({'value2': ['a', 'b', 'c']},
index=['bar', 'baz', 'foo'])
joined = left.join(right, on='key', sort=True)
expected = DataFrame({'key': ['bar', 'baz', 'foo', 'foo'],
'value': [2, 3, 1, 4],
'value2': ['a', 'b', 'c', 'c']},
index=[1, 2, 0, 3])
assert_frame_equal(joined, expected)
# smoke test
joined = left.join(right, on='key', sort=False)
self.assert_(np.array_equal(joined.index, lrange(4)))
def test_intelligently_handle_join_key(self):
# #733, be a bit more 1337 about not returning unconsolidated DataFrame
left = DataFrame({'key': [1, 1, 2, 2, 3],
'value': lrange(5)}, columns=['value', 'key'])
right = DataFrame({'key': [1, 1, 2, 3, 4, 5],
'rvalue': lrange(6)})
joined = merge(left, right, on='key', how='outer')
expected = DataFrame({'key': [1, 1, 1, 1, 2, 2, 3, 4, 5.],
'value': np.array([0, 0, 1, 1, 2, 3, 4,
np.nan, np.nan]),
'rvalue': np.array([0, 1, 0, 1, 2, 2, 3, 4, 5])},
columns=['value', 'key', 'rvalue'])
assert_frame_equal(joined, expected, check_dtype=False)
self.assert_(joined._data.is_consolidated())
def test_handle_join_key_pass_array(self):
left = DataFrame({'key': [1, 1, 2, 2, 3],
'value': lrange(5)}, columns=['value', 'key'])
right = DataFrame({'rvalue': lrange(6)})
key = np.array([1, 1, 2, 3, 4, 5])
merged = merge(left, right, left_on='key', right_on=key, how='outer')
merged2 = merge(right, left, left_on=key, right_on='key', how='outer')
assert_series_equal(merged['key'], merged2['key'])
self.assert_(merged['key'].notnull().all())
self.assert_(merged2['key'].notnull().all())
left = DataFrame({'value': lrange(5)}, columns=['value'])
right = DataFrame({'rvalue': lrange(6)})
lkey = np.array([1, 1, 2, 2, 3])
rkey = np.array([1, 1, 2, 3, 4, 5])
merged = merge(left, right, left_on=lkey, right_on=rkey, how='outer')
self.assert_(np.array_equal(merged['key_0'],
np.array([1, 1, 1, 1, 2, 2, 3, 4, 5])))
left = DataFrame({'value': lrange(3)})
right = DataFrame({'rvalue': lrange(6)})
key = np.array([0, 1, 1, 2, 2, 3])
merged = merge(left, right, left_index=True, right_on=key, how='outer')
self.assert_(np.array_equal(merged['key_0'], key))
def test_mixed_type_join_with_suffix(self):
# GH #916
df = DataFrame(np.random.randn(20, 6),
columns=['a', 'b', 'c', 'd', 'e', 'f'])
df.insert(0, 'id', 0)
df.insert(5, 'dt', 'foo')
grouped = df.groupby('id')
mn = grouped.mean()
cn = grouped.count()
# it works!
mn.join(cn, rsuffix='_right')
def test_no_overlap_more_informative_error(self):
dt = datetime.now()
df1 = DataFrame({'x': ['a']}, index=[dt])
df2 = DataFrame({'y': ['b', 'c']}, index=[dt, dt])
self.assertRaises(MergeError, merge, df1, df2)
def test_merge_non_unique_indexes(self):
dt = datetime(2012, 5, 1)
dt2 = datetime(2012, 5, 2)
dt3 = datetime(2012, 5, 3)
dt4 = datetime(2012, 5, 4)
df1 = DataFrame({'x': ['a']}, index=[dt])
df2 = DataFrame({'y': ['b', 'c']}, index=[dt, dt])
_check_merge(df1, df2)
# Not monotonic
df1 = DataFrame({'x': ['a', 'b', 'q']}, index=[dt2, dt, dt4])
df2 = DataFrame({'y': ['c', 'd', 'e', 'f', 'g', 'h']},
index=[dt3, dt3, dt2, dt2, dt, dt])
_check_merge(df1, df2)
df1 = DataFrame({'x': ['a', 'b']}, index=[dt, dt])
df2 = DataFrame({'y': ['c', 'd']}, index=[dt, dt])
_check_merge(df1, df2)
def test_merge_non_unique_index_many_to_many(self):
dt = datetime(2012, 5, 1)
dt2 = datetime(2012, 5, 2)
dt3 = datetime(2012, 5, 3)
df1 = DataFrame({'x': ['a', 'b', 'c', 'd']},
index=[dt2, dt2, dt, dt])
df2 = DataFrame({'y': ['e', 'f', 'g', ' h', 'i']},
index=[dt2, dt2, dt3, dt, dt])
_check_merge(df1, df2)
def test_left_merge_empty_dataframe(self):
left = DataFrame({'key': [1], 'value': [2]})
right = DataFrame({'key': []})
result = merge(left, right, on='key', how='left')
assert_frame_equal(result, left)
result = merge(right, left, on='key', how='right')
assert_frame_equal(result, left)
def test_merge_nosort(self):
# #2098, anything to do?
from datetime import datetime
d = {"var1": np.random.randint(0, 10, size=10),
"var2": np.random.randint(0, 10, size=10),
"var3": [datetime(2012, 1, 12), datetime(2011, 2, 4),
datetime(
2010, 2, 3), datetime(2012, 1, 12),
datetime(
2011, 2, 4), datetime(2012, 4, 3),
datetime(
2012, 3, 4), datetime(2008, 5, 1),
datetime(2010, 2, 3), datetime(2012, 2, 3)]}
df = DataFrame.from_dict(d)
var3 = df.var3.unique()
var3.sort()
new = DataFrame.from_dict({"var3": var3,
"var8": np.random.random(7)})
result = df.merge(new, on="var3", sort=False)
exp = merge(df, new, on='var3', sort=False)
assert_frame_equal(result, exp)
self.assert_((df.var3.unique() == result.var3.unique()).all())
def test_merge_nan_right(self):
df1 = DataFrame({"i1" : [0, 1], "i2" : [0, 1]})
df2 = DataFrame({"i1" : [0], "i3" : [0]})
result = df1.join(df2, on="i1", rsuffix="_")
expected = DataFrame({'i1': {0: 0.0, 1: 1}, 'i2': {0: 0, 1: 1},
'i1_': {0: 0, 1: np.nan}, 'i3': {0: 0.0, 1: np.nan},
None: {0: 0, 1: 0}}).set_index(None).reset_index()[['i1', 'i2', 'i1_', 'i3']]
assert_frame_equal(result, expected, check_dtype=False)
df1 = DataFrame({"i1" : [0, 1], "i2" : [0.5, 1.5]})
df2 = DataFrame({"i1" : [0], "i3" : [0.7]})
result = df1.join(df2, rsuffix="_", on='i1')
expected = DataFrame({'i1': {0: 0, 1: 1}, 'i1_': {0: 0.0, 1: nan},
'i2': {0: 0.5, 1: 1.5}, 'i3': {0: 0.69999999999999996,
1: nan}})[['i1', 'i2', 'i1_', 'i3']]
assert_frame_equal(result, expected)
def test_append_dtype_coerce(self):
# GH 4993
# appending with datetime will incorrectly convert datetime64
import datetime as dt
from pandas import NaT
df1 = DataFrame(index=[1,2], data=[dt.datetime(2013,1,1,0,0),
dt.datetime(2013,1,2,0,0)],
columns=['start_time'])
df2 = DataFrame(index=[4,5], data=[[dt.datetime(2013,1,3,0,0),
dt.datetime(2013,1,3,6,10)],
[dt.datetime(2013,1,4,0,0),
dt.datetime(2013,1,4,7,10)]],
columns=['start_time','end_time'])
expected = concat([
Series([NaT,NaT,dt.datetime(2013,1,3,6,10),dt.datetime(2013,1,4,7,10)],name='end_time'),
Series([dt.datetime(2013,1,1,0,0),dt.datetime(2013,1,2,0,0),dt.datetime(2013,1,3,0,0),dt.datetime(2013,1,4,0,0)],name='start_time'),
],axis=1)
result = df1.append(df2,ignore_index=True)
assert_frame_equal(result, expected)
def test_join_append_timedeltas(self):
import datetime as dt
from pandas import NaT
# timedelta64 issues with join/merge
# GH 5695
if _np_version_under1p7:
raise nose.SkipTest("numpy < 1.7")
d = {'d': dt.datetime(2013, 11, 5, 5, 56), 't': dt.timedelta(0, 22500)}
df = DataFrame(columns=list('dt'))
df = df.append(d, ignore_index=True)
result = df.append(d, ignore_index=True)
expected = DataFrame({'d': [dt.datetime(2013, 11, 5, 5, 56),
dt.datetime(2013, 11, 5, 5, 56) ],
't': [ dt.timedelta(0, 22500),
dt.timedelta(0, 22500) ]})
assert_frame_equal(result, expected)
td = np.timedelta64(300000000)
lhs = DataFrame(Series([td,td],index=["A","B"]))
rhs = DataFrame(Series([td],index=["A"]))
from pandas import NaT
result = lhs.join(rhs,rsuffix='r', how="left")
expected = DataFrame({ '0' : Series([td,td],index=list('AB')), '0r' : Series([td,NaT],index=list('AB')) })
assert_frame_equal(result, expected)
def test_overlapping_columns_error_message(self):
# #2649
df = DataFrame({'key': [1, 2, 3],
'v1': [4, 5, 6],
'v2': [7, 8, 9]})
df2 = DataFrame({'key': [1, 2, 3],
'v1': [4, 5, 6],
'v2': [7, 8, 9]})
df.columns = ['key', 'foo', 'foo']
df2.columns = ['key', 'bar', 'bar']
self.assertRaises(Exception, merge, df, df2)
def _check_merge(x, y):
for how in ['inner', 'left', 'outer']:
result = x.join(y, how=how)
expected = merge(x.reset_index(), y.reset_index(), how=how,
sort=True)
expected = expected.set_index('index')
assert_frame_equal(result, expected, check_names=False) # TODO check_names on merge?
class TestMergeMulti(tm.TestCase):
def setUp(self):
self.index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.to_join = DataFrame(np.random.randn(10, 3), index=self.index,
columns=['j_one', 'j_two', 'j_three'])
# a little relevant example with NAs
key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux',
'qux', 'snap']
key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two',
'three', 'one']
data = np.random.randn(len(key1))
self.data = DataFrame({'key1': key1, 'key2': key2,
'data': data})
def test_merge_on_multikey(self):
joined = self.data.join(self.to_join, on=['key1', 'key2'])
join_key = Index(lzip(self.data['key1'], self.data['key2']))
indexer = self.to_join.index.get_indexer(join_key)
ex_values = self.to_join.values.take(indexer, axis=0)
ex_values[indexer == -1] = np.nan
expected = self.data.join(DataFrame(ex_values,
columns=self.to_join.columns))
# TODO: columns aren't in the same order yet
assert_frame_equal(joined, expected.ix[:, joined.columns])
def test_merge_right_vs_left(self):
# compare left vs right merge with multikey
merged1 = self.data.merge(self.to_join, left_on=['key1', 'key2'],
right_index=True, how='left')
merged2 = self.to_join.merge(self.data, right_on=['key1', 'key2'],
left_index=True, how='right')
merged2 = merged2.ix[:, merged1.columns]
assert_frame_equal(merged1, merged2)
def test_compress_group_combinations(self):
# ~ 40000000 possible unique groups
key1 = np.array([rands(10) for _ in range(10000)], dtype='O')
key1 = np.tile(key1, 2)
key2 = key1[::-1]
df = DataFrame({'key1': key1, 'key2': key2,
'value1': np.random.randn(20000)})
df2 = DataFrame({'key1': key1[::2], 'key2': key2[::2],
'value2': np.random.randn(10000)})
# just to hit the label compression code path
merged = merge(df, df2, how='outer')
def test_left_join_index_preserve_order(self):
left = DataFrame({'k1': [0, 1, 2] * 8,
'k2': ['foo', 'bar'] * 12,
'v': np.array(np.arange(24),dtype=np.int64) })
index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])
right = DataFrame({'v2': [5, 7]}, index=index)
result = left.join(right, on=['k1', 'k2'])
expected = left.copy()
expected['v2'] = np.nan
expected['v2'][(expected.k1 == 2) & (expected.k2 == 'bar')] = 5
expected['v2'][(expected.k1 == 1) & (expected.k2 == 'foo')] = 7
tm.assert_frame_equal(result, expected)
# test join with multi dtypes blocks
left = DataFrame({'k1': [0, 1, 2] * 8,
'k2': ['foo', 'bar'] * 12,
'k3' : np.array([0, 1, 2]*8, dtype=np.float32),
'v': np.array(np.arange(24),dtype=np.int32) })
index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])
right = DataFrame({'v2': [5, 7]}, index=index)
result = left.join(right, on=['k1', 'k2'])
expected = left.copy()
expected['v2'] = np.nan
expected['v2'][(expected.k1 == 2) & (expected.k2 == 'bar')] = 5
expected['v2'][(expected.k1 == 1) & (expected.k2 == 'foo')] = 7
tm.assert_frame_equal(result, expected)
# do a right join for an extra test
joined = merge(right, left, left_index=True,
right_on=['k1', 'k2'], how='right')
tm.assert_frame_equal(joined.ix[:, expected.columns], expected)
def test_join_multi_dtypes(self):
# test with multi dtypes in the join index
def _test(dtype1,dtype2):
left = DataFrame({'k1': np.array([0, 1, 2] * 8, dtype=dtype1),
'k2': ['foo', 'bar'] * 12,
'v': np.array(np.arange(24),dtype=np.int64) })
index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])
right = DataFrame({'v2': np.array([5, 7], dtype=dtype2)}, index=index)
result = left.join(right, on=['k1', 'k2'])
expected = left.copy()
if dtype2.kind == 'i':
dtype2 = np.dtype('float64')
expected['v2'] = np.array(np.nan,dtype=dtype2)
expected['v2'][(expected.k1 == 2) & (expected.k2 == 'bar')] = 5
expected['v2'][(expected.k1 == 1) & (expected.k2 == 'foo')] = 7
tm.assert_frame_equal(result, expected)
for d1 in [np.int64,np.int32,np.int16,np.int8,np.uint8]:
for d2 in [np.int64,np.float64,np.float32,np.float16]:
_test(np.dtype(d1),np.dtype(d2))
def test_left_merge_na_buglet(self):
left = DataFrame({'id': list('abcde'), 'v1': randn(5),
'v2': randn(5), 'dummy': list('abcde'),
'v3': randn(5)},
columns=['id', 'v1', 'v2', 'dummy', 'v3'])
right = DataFrame({'id': ['a', 'b', np.nan, np.nan, np.nan],
'sv3': [1.234, 5.678, np.nan, np.nan, np.nan]})
merged = merge(left, right, on='id', how='left')
rdf = right.drop(['id'], axis=1)
expected = left.join(rdf)
tm.assert_frame_equal(merged, expected)
def test_merge_na_keys(self):
data = [[1950, "A", 1.5],
[1950, "B", 1.5],
[1955, "B", 1.5],
[1960, "B", np.nan],
[1970, "B", 4.],
[1950, "C", 4.],
[1960, "C", np.nan],
[1965, "C", 3.],
[1970, "C", 4.]]
frame = DataFrame(data, columns=["year", "panel", "data"])
other_data = [[1960, 'A', np.nan],
[1970, 'A', np.nan],
[1955, 'A', np.nan],
[1965, 'A', np.nan],
[1965, 'B', np.nan],
[1955, 'C', np.nan]]
other = DataFrame(other_data, columns=['year', 'panel', 'data'])
result = frame.merge(other, how='outer')
expected = frame.fillna(-999).merge(other.fillna(-999), how='outer')
expected = expected.replace(-999, np.nan)
tm.assert_frame_equal(result, expected)
def test_int64_overflow_issues(self):
# #2690, combinatorial explosion
df1 = DataFrame(np.random.randn(1000, 7),
columns=list('ABCDEF') + ['G1'])
df2 = DataFrame(np.random.randn(1000, 7),
columns=list('ABCDEF') + ['G2'])
# it works!
result = merge(df1, df2, how='outer')
self.assertTrue(len(result) == 2000)
def _check_join(left, right, result, join_col, how='left',
lsuffix='_x', rsuffix='_y'):
# some smoke tests
for c in join_col:
assert(result[c].notnull().all())
left_grouped = left.groupby(join_col)
right_grouped = right.groupby(join_col)
for group_key, group in result.groupby(join_col):
l_joined = _restrict_to_columns(group, left.columns, lsuffix)
r_joined = _restrict_to_columns(group, right.columns, rsuffix)
try:
lgroup = left_grouped.get_group(group_key)
except KeyError:
if how in ('left', 'inner'):
raise AssertionError('key %s should not have been in the join'
% str(group_key))
_assert_all_na(l_joined, left.columns, join_col)
else:
_assert_same_contents(l_joined, lgroup)
try:
rgroup = right_grouped.get_group(group_key)
except KeyError:
if how in ('right', 'inner'):
raise AssertionError('key %s should not have been in the join'
% str(group_key))
_assert_all_na(r_joined, right.columns, join_col)
else:
_assert_same_contents(r_joined, rgroup)
def _restrict_to_columns(group, columns, suffix):
found = [c for c in group.columns
if c in columns or c.replace(suffix, '') in columns]
# filter
group = group.ix[:, found]
# get rid of suffixes, if any
group = group.rename(columns=lambda x: x.replace(suffix, ''))
# put in the right order...
group = group.ix[:, columns]
return group
def _assert_same_contents(join_chunk, source):
NA_SENTINEL = -1234567 # drop_duplicates not so NA-friendly...
jvalues = join_chunk.fillna(NA_SENTINEL).drop_duplicates().values
svalues = source.fillna(NA_SENTINEL).drop_duplicates().values
rows = set(tuple(row) for row in jvalues)
assert(len(rows) == len(source))
assert(all(tuple(row) in rows for row in svalues))
def _assert_all_na(join_chunk, source_columns, join_col):
for c in source_columns:
if c in join_col:
continue
assert(join_chunk[c].isnull().all())
def _join_by_hand(a, b, how='left'):
join_index = a.index.join(b.index, how=how)
a_re = a.reindex(join_index)
b_re = b.reindex(join_index)
result_columns = a.columns.append(b.columns)
for col, s in compat.iteritems(b_re):
a_re[col] = s
return a_re.reindex(columns=result_columns)
class TestConcatenate(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.frame = DataFrame(tm.getSeriesData())
self.mixed_frame = self.frame.copy()
self.mixed_frame['foo'] = 'bar'
def test_append(self):
begin_index = self.frame.index[:5]
end_index = self.frame.index[5:]
begin_frame = self.frame.reindex(begin_index)
end_frame = self.frame.reindex(end_index)
appended = begin_frame.append(end_frame)
assert_almost_equal(appended['A'], self.frame['A'])
del end_frame['A']
partial_appended = begin_frame.append(end_frame)
self.assert_('A' in partial_appended)
partial_appended = end_frame.append(begin_frame)
self.assert_('A' in partial_appended)
# mixed type handling
appended = self.mixed_frame[:5].append(self.mixed_frame[5:])
assert_frame_equal(appended, self.mixed_frame)
# what to test here
mixed_appended = self.mixed_frame[:5].append(self.frame[5:])
mixed_appended2 = self.frame[:5].append(self.mixed_frame[5:])
# all equal except 'foo' column
assert_frame_equal(
mixed_appended.reindex(columns=['A', 'B', 'C', 'D']),
mixed_appended2.reindex(columns=['A', 'B', 'C', 'D']))
# append empty
empty = DataFrame({})
appended = self.frame.append(empty)
assert_frame_equal(self.frame, appended)
self.assert_(appended is not self.frame)
appended = empty.append(self.frame)
assert_frame_equal(self.frame, appended)
self.assert_(appended is not self.frame)
# overlap
self.assertRaises(ValueError, self.frame.append, self.frame,
verify_integrity=True)
# new columns
# GH 6129
df = DataFrame({'a': {'x': 1, 'y': 2}, 'b': {'x': 3, 'y': 4}})
row = Series([5, 6, 7], index=['a', 'b', 'c'], name='z')
expected = DataFrame({'a': {'x': 1, 'y': 2, 'z': 5}, 'b': {'x': 3, 'y': 4, 'z': 6}, 'c' : {'z' : 7}})
result = df.append(row)
assert_frame_equal(result, expected)
def test_append_length0_frame(self):
df = DataFrame(columns=['A', 'B', 'C'])
df3 = DataFrame(index=[0, 1], columns=['A', 'B'])
df5 = df.append(df3)
expected = DataFrame(index=[0, 1], columns=['A', 'B', 'C'])
assert_frame_equal(df5, expected)
def test_append_records(self):
arr1 = np.zeros((2,), dtype=('i4,f4,a10'))
arr1[:] = [(1, 2., 'Hello'), (2, 3., "World")]
arr2 = np.zeros((3,), dtype=('i4,f4,a10'))
arr2[:] = [(3, 4., 'foo'),
(5, 6., "bar"),
(7., 8., 'baz')]
df1 = DataFrame(arr1)
df2 = DataFrame(arr2)
result = df1.append(df2, ignore_index=True)
expected = DataFrame(np.concatenate((arr1, arr2)))
assert_frame_equal(result, expected)
def test_append_different_columns(self):
df = DataFrame({'bools': np.random.randn(10) > 0,
'ints': np.random.randint(0, 10, 10),
'floats': np.random.randn(10),
'strings': ['foo', 'bar'] * 5})
a = df[:5].ix[:, ['bools', 'ints', 'floats']]
b = df[5:].ix[:, ['strings', 'ints', 'floats']]
appended = a.append(b)
self.assert_(isnull(appended['strings'][0:4]).all())
self.assert_(isnull(appended['bools'][5:]).all())
def test_append_many(self):
chunks = [self.frame[:5], self.frame[5:10],
self.frame[10:15], self.frame[15:]]
result = chunks[0].append(chunks[1:])
tm.assert_frame_equal(result, self.frame)
chunks[-1]['foo'] = 'bar'
result = chunks[0].append(chunks[1:])
tm.assert_frame_equal(result.ix[:, self.frame.columns], self.frame)
self.assert_((result['foo'][15:] == 'bar').all())
self.assert_(result['foo'][:15].isnull().all())
def test_append_preserve_index_name(self):
# #980
df1 = DataFrame(data=None, columns=['A', 'B', 'C'])
df1 = df1.set_index(['A'])
df2 = DataFrame(data=[[1, 4, 7], [2, 5, 8], [3, 6, 9]],
columns=['A', 'B', 'C'])
df2 = df2.set_index(['A'])
result = df1.append(df2)
self.assert_(result.index.name == 'A')
def test_join_many(self):
df = DataFrame(np.random.randn(10, 6), columns=list('abcdef'))
df_list = [df[['a', 'b']], df[['c', 'd']], df[['e', 'f']]]
joined = df_list[0].join(df_list[1:])
tm.assert_frame_equal(joined, df)
df_list = [df[['a', 'b']][:-2],
df[['c', 'd']][2:], df[['e', 'f']][1:9]]
def _check_diff_index(df_list, result, exp_index):
reindexed = [x.reindex(exp_index) for x in df_list]
expected = reindexed[0].join(reindexed[1:])
tm.assert_frame_equal(result, expected)
# different join types
joined = df_list[0].join(df_list[1:], how='outer')
_check_diff_index(df_list, joined, df.index)
joined = df_list[0].join(df_list[1:])
_check_diff_index(df_list, joined, df_list[0].index)
joined = df_list[0].join(df_list[1:], how='inner')
_check_diff_index(df_list, joined, df.index[2:8])
self.assertRaises(ValueError, df_list[0].join, df_list[1:], on='a')
def test_join_many_mixed(self):
df = DataFrame(np.random.randn(8, 4), columns=['A', 'B', 'C', 'D'])
df['key'] = ['foo', 'bar'] * 4
df1 = df.ix[:, ['A', 'B']]
df2 = df.ix[:, ['C', 'D']]
df3 = df.ix[:, ['key']]
result = df1.join([df2, df3])
assert_frame_equal(result, df)
def test_append_missing_column_proper_upcast(self):
df1 = DataFrame({'A': np.array([1, 2, 3, 4], dtype='i8')})
df2 = DataFrame({'B': np.array([True, False, True, False],
dtype=bool)})
appended = df1.append(df2, ignore_index=True)
self.assert_(appended['A'].dtype == 'f8')
self.assert_(appended['B'].dtype == 'O')
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1, 1],
[0, 1, 2, 0, 1, 2, 3]])
expected = DataFrame(np.r_[df.values, df2.values],
index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values],
index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values],
columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values],
columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.ix[:, [0, 1]], df.ix[:, [2]], df.ix[:, [3]]]
level = ['three', 'two', 'one', 'zero']
result = concat(pieces, axis=1, keys=['one', 'two', 'three'],
levels=[level],
names=['group_key'])
self.assert_(np.array_equal(result.columns.levels[0], level))
self.assertEqual(result.columns.names[0], 'group_key')
def test_concat_dataframe_keys_bug(self):
t1 = DataFrame({'value': Series([1, 2, 3],
index=Index(['a', 'b', 'c'], name='id'))})
t2 = DataFrame({'value': Series([7, 8],
index=Index(['a', 'b'], name='id'))})
# it works
result = concat([t1, t2], axis=1, keys=['t1', 't2'])
self.assertEqual(list(result.columns), [('t1', 'value'),
('t2', 'value')])
def test_concat_dict(self):
frames = {'foo': DataFrame(np.random.randn(4, 3)),
'bar': DataFrame(np.random.randn(4, 3)),
'baz': DataFrame(np.random.randn(4, 3)),
'qux': DataFrame(np.random.randn(4, 3))}
sorted_keys = sorted(frames)
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys,
axis=1)
tm.assert_frame_equal(result, expected)
keys = ['baz', 'foo', 'bar']
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self):
frame1 = DataFrame({"test1": ["a", "b", "c"],
"test2": [1, 2, 3],
"test3": [4.5, 3.2, 1.2]})
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True)
nan = np.nan
expected = DataFrame([[nan, nan, nan, 4.3],
['a', 1, 4.5, 5.2],
['b', 2, 3.2, 2.2],
['c', 3, 1.2, nan]],
index=Index(["q", "x", "y", "z"]))
tm.assert_frame_equal(v1, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
result = concat([frame, frame], keys=[0, 1], names=['iteration'])
self.assertEqual(result.index.names, ('iteration',) + index.names)
tm.assert_frame_equal(result.ix[0], frame)
tm.assert_frame_equal(result.ix[1], frame)
self.assertEqual(result.index.nlevels, 3)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [['foo', 'baz'], ['one', 'two']]
names = ['first', 'second']
result = concat([df, df2, df, df2],
keys=[('foo', 'one'), ('foo', 'two'),
('baz', 'one'), ('baz', 'two')],
levels=levels,
names=names)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(levels=levels + [[0]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1],
[0, 0, 0, 0]],
names=names + [None])
expected.index = exp_index
assert_frame_equal(result, expected)
# no names
result = concat([df, df2, df, df2],
keys=[('foo', 'one'), ('foo', 'two'),
('baz', 'one'), ('baz', 'two')],
levels=levels)
self.assertEqual(result.index.names, (None,) * 3)
# no levels
result = concat([df, df2, df, df2],
keys=[('foo', 'one'), ('foo', 'two'),
('baz', 'one'), ('baz', 'two')],
names=['first', 'second'])
self.assertEqual(result.index.names, ('first', 'second') + (None,))
self.assert_(np.array_equal(result.index.levels[0], ['baz', 'foo']))
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=['a'])
df2 = DataFrame(np.random.randn(1, 4), index=['b'])
self.assertRaises(ValueError, concat, [df, df],
keys=['one', 'two'], levels=[['foo', 'bar', 'baz']])
self.assertRaises(ValueError, concat, [df, df2],
keys=['one', 'two'], levels=[['foo', 'bar', 'baz']])
def test_concat_rename_index(self):
a = DataFrame(np.random.rand(3, 3),
columns=list('ABC'),
index=Index(list('abc'), name='index_a'))
b = DataFrame(np.random.rand(3, 3),
columns=list('ABC'),
index=Index(list('abc'), name='index_b'))
result = concat([a, b], keys=['key0', 'key1'],
names=['lvl0', 'lvl1'])
exp = concat([a, b], keys=['key0', 'key1'], names=['lvl0'])
names = list(exp.index.names)
names[1] = 'lvl1'
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
self.assertEqual(result.index.names, exp.index.names)
def test_crossed_dtypes_weird_corner(self):
columns = ['A', 'B', 'C', 'D']
df1 = DataFrame({'A': np.array([1, 2, 3, 4], dtype='f8'),
'B': np.array([1, 2, 3, 4], dtype='i8'),
'C': np.array([1, 2, 3, 4], dtype='f8'),
'D': np.array([1, 2, 3, 4], dtype='i8')},
columns=columns)
df2 = DataFrame({'A': np.array([1, 2, 3, 4], dtype='i8'),
'B': np.array([1, 2, 3, 4], dtype='f8'),
'C': np.array([1, 2, 3, 4], dtype='i8'),
'D': np.array([1, 2, 3, 4], dtype='f8')},
columns=columns)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(np.concatenate([df1.values, df2.values], axis=0),
columns=columns)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=['a'])
df2 = DataFrame(np.random.randn(1, 4), index=['b'])
result = concat(
[df, df2], keys=['one', 'two'], names=['first', 'second'])
self.assertEqual(result.index.names, ('first', 'second'))
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(np.random.randint(0,10,size=40).reshape(10,4),columns=['A','A','C','C'])
result = concat([df,df],axis=1)
assert_frame_equal(result.iloc[:,:4],df)
assert_frame_equal(result.iloc[:,4:],df)
result = concat([df,df],axis=0)
assert_frame_equal(result.iloc[:10],df)
assert_frame_equal(result.iloc[10:],df)
# multi dtypes
df = concat([DataFrame(np.random.randn(10,4),columns=['A','A','B','B']),
DataFrame(np.random.randint(0,10,size=20).reshape(10,2),columns=['A','C'])],
axis=1)
result = concat([df,df],axis=1)
assert_frame_equal(result.iloc[:,:6],df)
assert_frame_equal(result.iloc[:,6:],df)
result = concat([df,df],axis=0)
assert_frame_equal(result.iloc[:10],df)
assert_frame_equal(result.iloc[10:],df)
# append
result = df.iloc[0:8,:].append(df.iloc[8:])
assert_frame_equal(result, df)
result = df.iloc[0:8,:].append(df.iloc[8:9]).append(df.iloc[9:10])
assert_frame_equal(result, df)
expected = concat([df,df],axis=0)
result = df.append(df)
assert_frame_equal(result, expected)
def test_join_dups(self):
# joining dups
df = concat([DataFrame(np.random.randn(10,4),columns=['A','A','B','B']),
DataFrame(np.random.randint(0,10,size=20).reshape(10,2),columns=['A','C'])],
axis=1)
expected = concat([df,df],axis=1)
result = df.join(df,rsuffix='_2')
result.columns = expected.columns
assert_frame_equal(result, expected)
# GH 4975, invalid join on dups
w = DataFrame(np.random.randn(4,2), columns=["x", "y"])
x = DataFrame(np.random.randn(4,2), columns=["x", "y"])
y = DataFrame(np.random.randn(4,2), columns=["x", "y"])
z = DataFrame(np.random.randn(4,2), columns=["x", "y"])
dta = x.merge(y, left_index=True, right_index=True).merge(z, left_index=True, right_index=True, how="outer")
dta = dta.merge(w, left_index=True, right_index=True)
expected = concat([x,y,z,w],axis=1)
expected.columns=['x_x','y_x','x_y','y_y','x_x','y_x','x_y','y_y']
assert_frame_equal(dta,expected)
def test_handle_empty_objects(self):
df = DataFrame(np.random.randn(10, 4), columns=list('abcd'))
baz = df[:5]
baz['foo'] = 'bar'
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0)
expected = df.ix[:, ['a', 'b', 'c', 'd', 'foo']]
expected['foo'] = expected['foo'].astype('O')
expected['foo'][:5] = 'bar'
tm.assert_frame_equal(concatted, expected)
def test_panel_join(self):
panel = tm.makePanel()
tm.add_nans(panel)
p1 = panel.ix[:2, :10, :3]
p2 = panel.ix[2:, 5:, 2:]
# left join
result = p1.join(p2)
expected = p1.copy()
expected['ItemC'] = p2['ItemC']
tm.assert_panel_equal(result, expected)
# right join
result = p1.join(p2, how='right')
expected = p2.copy()
expected['ItemA'] = p1['ItemA']
expected['ItemB'] = p1['ItemB']
expected = expected.reindex(items=['ItemA', 'ItemB', 'ItemC'])
tm.assert_panel_equal(result, expected)
# inner join
result = p1.join(p2, how='inner')
expected = panel.ix[:, 5:10, 2:3]
tm.assert_panel_equal(result, expected)
# outer join
result = p1.join(p2, how='outer')
expected = p1.reindex(major=panel.major_axis,
minor=panel.minor_axis)
expected = expected.join(p2.reindex(major=panel.major_axis,
minor=panel.minor_axis))
tm.assert_panel_equal(result, expected)
def test_panel_join_overlap(self):
panel = tm.makePanel()
tm.add_nans(panel)
p1 = panel.ix[['ItemA', 'ItemB', 'ItemC']]
p2 = panel.ix[['ItemB', 'ItemC']]
joined = p1.join(p2, lsuffix='_p1', rsuffix='_p2')
p1_suf = p1.ix[['ItemB', 'ItemC']].add_suffix('_p1')
p2_suf = p2.ix[['ItemB', 'ItemC']].add_suffix('_p2')
no_overlap = panel.ix[['ItemA']]
expected = p1_suf.join(p2_suf).join(no_overlap)
tm.assert_panel_equal(joined, expected)
def test_panel_join_many(self):
tm.K = 10
panel = tm.makePanel()
tm.K = 4
panels = [panel.ix[:2], panel.ix[2:6], panel.ix[6:]]
joined = panels[0].join(panels[1:])
tm.assert_panel_equal(joined, panel)
panels = [panel.ix[:2, :-5], panel.ix[2:6, 2:], panel.ix[6:, 5:-7]]
data_dict = {}
for p in panels:
data_dict.update(compat.iteritems(p))
joined = panels[0].join(panels[1:], how='inner')
expected = Panel.from_dict(data_dict, intersect=True)
tm.assert_panel_equal(joined, expected)
joined = panels[0].join(panels[1:], how='outer')
expected = Panel.from_dict(data_dict, intersect=False)
tm.assert_panel_equal(joined, expected)
# edge cases
self.assertRaises(ValueError, panels[0].join, panels[1:],
how='outer', lsuffix='foo', rsuffix='bar')
self.assertRaises(ValueError, panels[0].join, panels[1:],
how='right')
def test_panel_concat_other_axes(self):
panel = tm.makePanel()
p1 = panel.ix[:, :5, :]
p2 = panel.ix[:, 5:, :]
result = concat([p1, p2], axis=1)
tm.assert_panel_equal(result, panel)
p1 = panel.ix[:, :, :2]
p2 = panel.ix[:, :, 2:]
result = concat([p1, p2], axis=2)
tm.assert_panel_equal(result, panel)
# if things are a bit misbehaved
p1 = panel.ix[:2, :, :2]
p2 = panel.ix[:, :, 2:]
p1['ItemC'] = 'baz'
result = concat([p1, p2], axis=2)
expected = panel.copy()
expected['ItemC'] = expected['ItemC'].astype('O')
expected.ix['ItemC', :, :2] = 'baz'
tm.assert_panel_equal(result, expected)
def test_panel_concat_buglet(self):
# #2257
def make_panel():
index = 5
cols = 3
def df():
return DataFrame(np.random.randn(index, cols),
index=["I%s" % i for i in range(index)],
columns=["C%s" % i for i in range(cols)])
return Panel(dict([("Item%s" % x, df()) for x in ['A', 'B', 'C']]))
panel1 = make_panel()
panel2 = make_panel()
panel2 = panel2.rename_axis(dict([(x, "%s_1" % x)
for x in panel2.major_axis]),
axis=1)
panel3 = panel2.rename_axis(lambda x: '%s_1' % x, axis=1)
panel3 = panel3.rename_axis(lambda x: '%s_1' % x, axis=2)
# it works!
concat([panel1, panel3], axis=1, verify_integrity=True)
def test_panel4d_concat(self):
p4d = tm.makePanel4D()
p1 = p4d.ix[:, :, :5, :]
p2 = p4d.ix[:, :, 5:, :]
result = concat([p1, p2], axis=2)
tm.assert_panel4d_equal(result, p4d)
p1 = p4d.ix[:, :, :, :2]
p2 = p4d.ix[:, :, :, 2:]
result = concat([p1, p2], axis=3)
tm.assert_panel4d_equal(result, p4d)
def test_panel4d_concat_mixed_type(self):
p4d = tm.makePanel4D()
# if things are a bit misbehaved
p1 = p4d.ix[:, :2, :, :2]
p2 = p4d.ix[:, :, :, 2:]
p1['L5'] = 'baz'
result = concat([p1, p2], axis=3)
p2['L5'] = np.nan
expected = concat([p1, p2], axis=3)
expected = expected.ix[result.labels]
tm.assert_panel4d_equal(result, expected)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = 'foo'
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
self.assertEqual(result.name, ts.name)
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype='M8[ns]'))
exp_labels = [np.repeat([0, 1, 2], [len(x) for x in pieces]),
np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index],
labels=exp_labels)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
assert_frame_equal(result, expected)
result = concat(pieces, keys=['A', 'B', 'C'], axis=1)
expected = DataFrame(pieces, index=['A', 'B', 'C']).T
assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name='A')
s2 = Series(randn(5), name='B')
result = concat([s, s2], axis=1)
expected = DataFrame({'A': s, 'B': s2})
assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
self.assertTrue(np.array_equal(result.columns, lrange(2)))
# must reindex, #2603
s = Series(randn(3), index=['c', 'a', 'b'], name='A')
s2 = Series(randn(4), index=['d', 'a', 'b', 'c'], name='B')
result = concat([s, s2], axis=1)
expected = DataFrame({'A': s, 'B': s2})
assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=['foo'])
expected = concat([df, df], keys=['foo', 'bar'])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
self.assertRaises(Exception, concat, [None, None])
def test_concat_datetime64_block(self):
from pandas.tseries.index import date_range
rng = date_range('1/1/2000', periods=10)
df = DataFrame({'time': rng})
result = concat([df, df])
self.assert_((result.iloc[:10]['time'] == rng).all())
self.assert_((result.iloc[10:]['time'] == rng).all())
def test_concat_timedelta64_block(self):
# not friendly for < 1.7
if _np_version_under1p7:
raise nose.SkipTest("numpy < 1.7")
from pandas import to_timedelta
rng = to_timedelta(np.arange(10),unit='s')
df = DataFrame({'time': rng})
result = concat([df, df])
self.assert_((result.iloc[:10]['time'] == rng).all())
self.assert_((result.iloc[10:]['time'] == rng).all())
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
tm.assert_frame_equal(result, expected)
result = concat([None, df0, df0[:2], df0[:1], df0],
keys=['a', 'b', 'c', 'd', 'e'])
expected = concat([df0, df0[:2], df0[:1], df0],
keys=['b', 'c', 'd', 'e'])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
## to join with union
## these two are of different length!
left = concat([ts1, ts2], join='outer', axis=1)
right = concat([ts2, ts1], join='outer', axis=1)
self.assertEqual(len(left), len(right))
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = 'same name'
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns=['same name', 'same name']
assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame({'firmNo' : [0,0,0,0], 'stringvar' : ['rrr', 'rrr', 'rrr', 'rrr'], 'prc' : [6,6,6,6] })
df2 = DataFrame({'misc' : [1,2,3,4], 'prc' : [6,6,6,6], 'C' : [9,10,11,12]})
expected = DataFrame([[0,6,'rrr',9,1,6],
[0,6,'rrr',10,2,6],
[0,6,'rrr',11,3,6],
[0,6,'rrr',12,4,6]])
expected.columns = ['firmNo','prc','stringvar','C','misc','prc']
result = concat([df1,df2],axis=1)
assert_frame_equal(result,expected)
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range('01-Jan-2013', '01-Jan-2014', freq='MS')[0:-1]
s1 = Series(randn(len(dates)), index=dates, name='value')
s2 = Series(randn(len(dates)), index=dates, name='value')
result = concat([s1, s2], axis=1, ignore_index=True)
self.assertTrue(np.array_equal(result.columns, [0, 1]))
def test_concat_invalid_first_argument(self):
df1 = mkdf(10, 2)
df2 = mkdf(10, 2)
self.assertRaises(AssertionError, concat, df1, df2)
# generator ok though
concat(DataFrame(np.random.rand(5,5)) for _ in range(3))
def test_concat_mixed_types_fails(self):
df = DataFrame(randn(10, 1))
with tm.assertRaisesRegexp(TypeError, "Cannot concatenate.+"):
concat([df[0], df], axis=1)
with tm.assertRaisesRegexp(TypeError, "Cannot concatenate.+"):
concat([df, df[0]], axis=1)
class TestOrderedMerge(tm.TestCase):
def setUp(self):
self.left = DataFrame({'key': ['a', 'c', 'e'],
'lvalue': [1, 2., 3]})
self.right = DataFrame({'key': ['b', 'c', 'd', 'f'],
'rvalue': [1, 2, 3., 4]})
# GH #813
def test_basic(self):
result = ordered_merge(self.left, self.right, on='key')
expected = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'f'],
'lvalue': [1, nan, 2, nan, 3, nan],
'rvalue': [nan, 1, 2, 3, nan, 4]})
assert_frame_equal(result, expected)
def test_ffill(self):
result = ordered_merge(
self.left, self.right, on='key', fill_method='ffill')
expected = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'f'],
'lvalue': [1., 1, 2, 2, 3, 3.],
'rvalue': [nan, 1, 2, 3, 3, 4]})
assert_frame_equal(result, expected)
def test_multigroup(self):
left = concat([self.left, self.left], ignore_index=True)
# right = concat([self.right, self.right], ignore_index=True)
left['group'] = ['a'] * 3 + ['b'] * 3
# right['group'] = ['a'] * 4 + ['b'] * 4
result = ordered_merge(left, self.right, on='key', left_by='group',
fill_method='ffill')
expected = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'f'] * 2,
'lvalue': [1., 1, 2, 2, 3, 3.] * 2,
'rvalue': [nan, 1, 2, 3, 3, 4] * 2})
expected['group'] = ['a'] * 6 + ['b'] * 6
assert_frame_equal(result, expected.ix[:, result.columns])
result2 = ordered_merge(self.right, left, on='key', right_by='group',
fill_method='ffill')
assert_frame_equal(result, result2.ix[:, result.columns])
result = ordered_merge(left, self.right, on='key', left_by='group')
self.assert_(result['group'].notnull().all())
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
ML-KULeuven/socceraction | socceraction/spadl/opta.py | 1 | 60410 | # -*- coding: utf-8 -*-
"""Opta event stream data to SPADL converter."""
import copy
import glob
import json # type: ignore
import os
import re
import warnings
from abc import ABC
from datetime import datetime, timedelta
from typing import Any, Dict, List, Mapping, Optional, Tuple, Type
import pandas as pd # type: ignore
import pandera as pa
import unidecode # type: ignore
from lxml import objectify
from pandera.typing import DataFrame, DateTime, Series
from . import config as spadlconfig
from .base import (
CompetitionSchema,
EventDataLoader,
EventSchema,
GameSchema,
MissingDataError,
PlayerSchema,
TeamSchema,
_add_dribbles,
_fix_clearances,
_fix_direction_of_play,
)
__all__ = [
'OptaLoader',
'convert_to_actions',
'OptaCompetitionSchema',
'OptaGameSchema',
'OptaPlayerSchema',
'OptaTeamSchema',
'OptaEventSchema',
]
class OptaCompetitionSchema(CompetitionSchema):
"""Definition of a dataframe containing a list of competitions and seasons."""
class OptaGameSchema(GameSchema):
"""Definition of a dataframe containing a list of games."""
venue: Series[str] = pa.Field(nullable=True)
referee_id: Series[int] = pa.Field(nullable=True)
attendance: Series[int] = pa.Field(nullable=True)
duration: Series[int]
home_score: Series[int]
away_score: Series[int]
class OptaPlayerSchema(PlayerSchema):
"""Definition of a dataframe containing the list of teams of a game."""
firstname: Optional[Series[str]]
lastname: Optional[Series[str]]
nickname: Optional[Series[str]] = pa.Field(nullable=True)
starting_position_id: Series[int]
starting_position_name: Series[str]
height: Optional[Series[float]]
weight: Optional[Series[float]]
age: Optional[Series[int]]
class OptaTeamSchema(TeamSchema):
"""Definition of a dataframe containing the list of players of a game."""
class OptaEventSchema(EventSchema):
"""Definition of a dataframe containing event stream data of a game."""
timestamp: Series[DateTime]
minute: Series[int]
second: Series[int] = pa.Field(ge=0, le=59)
outcome: Series[bool]
start_x: Series[float] = pa.Field(nullable=True)
start_y: Series[float] = pa.Field(nullable=True)
end_x: Series[float] = pa.Field(nullable=True)
end_y: Series[float] = pa.Field(nullable=True)
assist: Series[bool] = pa.Field(nullable=True)
keypass: Series[bool] = pa.Field(nullable=True)
qualifiers: Series[object]
def _deepupdate(target: Dict[Any, Any], src: Dict[Any, Any]) -> None:
"""Deep update target dict with src.
For each k,v in src: if k doesn't exist in target, it is deep copied from
src to target. Otherwise, if v is a list, target[k] is extended with
src[k]. If v is a set, target[k] is updated with v, If v is a dict,
recursively deep-update it.
Examples
--------
>>> t = {'name': 'Ferry', 'hobbies': ['programming', 'sci-fi']}
>>> deepupdate(t, {'hobbies': ['gaming']})
>>> print(t)
{'name': 'Ferry', 'hobbies': ['programming', 'sci-fi', 'gaming']}
"""
for k, v in src.items():
if isinstance(v, list):
if k not in target:
target[k] = copy.deepcopy(v)
else:
target[k].extend(v)
elif isinstance(v, dict):
if k not in target:
target[k] = copy.deepcopy(v)
else:
_deepupdate(target[k], v)
elif isinstance(v, set):
if k not in target:
target[k] = v.copy()
else:
target[k].update(v.copy())
else:
target[k] = copy.copy(v)
def _extract_ids_from_path(path: str, pattern: str) -> Dict[str, int]:
regex = re.compile(
'.+?'
+ re.escape(pattern)
.replace(r'\{competition_id\}', r'(?P<competition_id>\d+)')
.replace(r'\{season_id\}', r'(?P<season_id>\d+)')
.replace(r'\{game_id\}', r'(?P<game_id>\d+)')
)
m = re.match(regex, path)
if m is None:
raise ValueError('The filepath {} does not match the format {}.'.format(path, pattern))
ids = m.groupdict()
return {k: int(v) for k, v in ids.items()}
class OptaParser(ABC):
"""Extract data from an Opta data stream.
Parameters
----------
path : str
Path of the data file.
"""
def __init__(self, path: str, *args: Any, **kwargs: Any):
pass
def extract_competitions(self) -> Dict[int, Dict[str, Any]]:
return {}
def extract_games(self) -> Dict[int, Dict[str, Any]]:
return {}
def extract_teams(self) -> Dict[int, Dict[str, Any]]:
return {}
def extract_players(self) -> Dict[int, Dict[str, Any]]:
return {}
def extract_events(self) -> Dict[int, Dict[str, Any]]:
return {}
class OptaLoader(EventDataLoader):
"""
Load Opta data from a local folder.
Parameters
----------
root : str
Root-path of the data.
feeds : dict
Glob pattern for each feed that should be parsed. For example::
{
'f7': "f7-{competition_id}-{season_id}-{game_id}.xml",
'f24': "f24-{competition_id}-{season_id}-{game_id}.xml"
}
If you use JSON files obtained from `WhoScored <whoscored.com>`__ use::
{
'whoscored': "{competition_id}-{season_id}/{game_id}.json",
}
parser : str or dict
Either 'xml', 'json', 'whoscored' or your custom parser for each feed.
The default xml parser supports F7 and F24 feeds; the default json
parser supports F1, F9 and F24 feeds. Custom parsers can be specified
as::
{
'feed1_name': Feed1Parser
'feed2_name': Feed2Parser
}
where Feed1Parser and Feed2Parser are classes implementing
:class:`~socceraction.spadl.opta.OptaParser` and 'feed1_name' and
'feed2_name' correspond to the keys in 'feeds'.
"""
def __init__(self, root: str, feeds: Dict[str, str], parser: Mapping[str, Type[OptaParser]]):
self.root = root
if parser == 'json':
self.parsers = self._get_parsers_for_feeds(_jsonparsers, feeds)
elif parser == 'xml':
self.parsers = self._get_parsers_for_feeds(_xmlparsers, feeds)
elif parser == 'whoscored':
self.parsers = self._get_parsers_for_feeds(_whoscoredparsers, feeds)
else:
self.parsers = self._get_parsers_for_feeds(parser, feeds)
self.feeds = feeds
def _get_parsers_for_feeds(
self, available_parsers: Mapping[str, Type[OptaParser]], feeds: Dict[str, str]
) -> Mapping[str, Type[OptaParser]]:
"""Select the appropriate parser for each feed.
Parameters
----------
available_parsers : dict(str, OptaParser)
Dictionary with all available parsers.
feeds : dict(str, str)
All feeds that should be parsed.
Returns
-------
dict(str, OptaParser)
A mapping between all feeds that should be parsed and the
corresponding parser class.
Warns
-----
Raises a warning if there is no parser available for any of the
provided feeds.
"""
parsers = {}
for feed in feeds:
if feed in available_parsers:
parsers[feed] = available_parsers[feed]
else:
warnings.warn(
'No parser available for {} feeds. This feed is ignored.'.format(feed)
)
return parsers
def competitions(self) -> DataFrame[OptaCompetitionSchema]:
"""Return a dataframe with all available competitions and seasons.
Returns
-------
pd.DataFrame
A dataframe containing all available competitions and seasons. See
:class:`~socceraction.spadl.opta.OptaCompetitionSchema` for the schema.
"""
data: Dict[int, Dict[str, Any]] = {}
for feed, feed_pattern in self.feeds.items():
glob_pattern = feed_pattern.format(competition_id='*', season_id='*', game_id='*')
feed_files = glob.glob(os.path.join(self.root, glob_pattern))
for ffp in feed_files:
ids = _extract_ids_from_path(ffp, feed_pattern)
parser = self.parsers[feed](ffp, **ids)
_deepupdate(data, parser.extract_competitions())
return pd.DataFrame(list(data.values()))
def games(self, competition_id: int, season_id: int) -> DataFrame[OptaGameSchema]:
"""Return a dataframe with all available games in a season.
Parameters
----------
competition_id : int
The ID of the competition.
season_id : int
The ID of the season.
Returns
-------
pd.DataFrame
A dataframe containing all available games. See
:class:`~socceraction.spadl.opta.OptaGameSchema` for the schema.
"""
data: Dict[int, Dict[str, Any]] = {}
for feed, feed_pattern in self.feeds.items():
glob_pattern = feed_pattern.format(
competition_id=competition_id, season_id=season_id, game_id='*'
)
feed_files = glob.glob(os.path.join(self.root, glob_pattern))
for ffp in feed_files:
try:
ids = _extract_ids_from_path(ffp, feed_pattern)
parser = self.parsers[feed](ffp, **ids)
_deepupdate(data, parser.extract_games())
except Exception:
warnings.warn('Could not parse {}'.format(ffp))
return pd.DataFrame(list(data.values()))
def teams(self, game_id: int) -> DataFrame[OptaTeamSchema]:
"""Return a dataframe with both teams that participated in a game.
Parameters
----------
game_id : int
The ID of the game.
Returns
-------
pd.DataFrame
A dataframe containing both teams. See
:class:`~socceraction.spadl.opta.OptaTeamSchema` for the schema.
"""
data: Dict[int, Dict[str, Any]] = {}
for feed, feed_pattern in self.feeds.items():
glob_pattern = feed_pattern.format(competition_id='*', season_id='*', game_id=game_id)
feed_files = glob.glob(os.path.join(self.root, glob_pattern))
for ffp in feed_files:
ids = _extract_ids_from_path(ffp, feed_pattern)
parser = self.parsers[feed](ffp, **ids)
_deepupdate(data, parser.extract_teams())
return pd.DataFrame(list(data.values()))
def players(self, game_id: int) -> DataFrame[OptaPlayerSchema]:
"""Return a dataframe with all players that participated in a game.
Parameters
----------
game_id : int
The ID of the game.
Returns
-------
pd.DataFrame
A dataframe containing all players. See
:class:`~socceraction.spadl.opta.OptaPlayerSchema` for the schema.
"""
data: Dict[int, Dict[str, Any]] = {}
for feed, feed_pattern in self.feeds.items():
glob_pattern = feed_pattern.format(competition_id='*', season_id='*', game_id=game_id)
feed_files = glob.glob(os.path.join(self.root, glob_pattern))
for ffp in feed_files:
ids = _extract_ids_from_path(ffp, feed_pattern)
parser = self.parsers[feed](ffp, **ids)
_deepupdate(data, parser.extract_players())
df_players = pd.DataFrame(list(data.values()))
df_players['game_id'] = game_id
return df_players
def events(self, game_id: int) -> DataFrame[OptaEventSchema]:
"""Return a dataframe with the event stream of a game.
Parameters
----------
game_id : int
The ID of the game.
Returns
-------
pd.DataFrame
A dataframe containing the event stream. See
:class:`~socceraction.spadl.opta.OptaEventSchema` for the schema.
"""
data: Dict[int, Dict[str, Any]] = {}
for feed, feed_pattern in self.feeds.items():
glob_pattern = feed_pattern.format(competition_id='*', season_id='*', game_id=game_id)
feed_files = glob.glob(os.path.join(self.root, glob_pattern))
for ffp in feed_files:
ids = _extract_ids_from_path(ffp, feed_pattern)
parser = self.parsers[feed](ffp, **ids)
_deepupdate(data, parser.extract_events())
events = (
pd.DataFrame(list(data.values()))
.merge(_eventtypesdf, on='type_id', how='left')
.sort_values(['game_id', 'period_id', 'minute', 'second', 'timestamp'])
.reset_index(drop=True)
)
return events
class OptaJSONParser(OptaParser):
"""Extract data from an Opta JSON data stream.
Parameters
----------
path : str
Path of the data file.
"""
def __init__(self, path: str, *args: Any, **kwargs: Any):
with open(path, 'rt', encoding='utf-8') as fh:
self.root = json.load(fh)
class OptaXMLParser(OptaParser):
"""Extract data from an Opta XML data stream.
Parameters
----------
path : str
Path of the data file.
"""
def __init__(self, path: str, *args: Any, **kwargs: Any):
with open(path, 'rb') as fh:
self.root = objectify.fromstring(fh.read())
class _F1JSONParser(OptaJSONParser):
def get_feed(self) -> Dict[str, Any]:
for node in self.root:
if 'OptaFeed' in node['data'].keys():
return node
raise MissingDataError
def get_doc(self) -> Dict[str, Any]:
f1 = self.get_feed()
data = assertget(f1, 'data')
optafeed = assertget(data, 'OptaFeed')
optadocument = assertget(optafeed, 'OptaDocument')
return optadocument
def extract_competitions(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
attr = assertget(optadocument, '@attributes')
competition_id = int(assertget(attr, 'competition_id'))
competition = dict(
season_id=int(assertget(attr, 'season_id')),
season_name=str(assertget(attr, 'season_id')),
competition_id=competition_id,
competition_name=assertget(attr, 'competition_name'),
)
return {competition_id: competition}
def extract_games(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
attr = assertget(optadocument, '@attributes')
matchdata = assertget(optadocument, 'MatchData')
matches = {}
for match in matchdata:
match_dict: Dict[str, Any] = {}
match_dict['competition_id'] = int(assertget(attr, 'competition_id'))
match_dict['season_id'] = int(assertget(attr, 'season_id'))
matchattr = assertget(match, '@attributes')
match_dict['game_id'] = int(assertget(matchattr, 'uID')[1:])
matchinfo = assertget(match, 'MatchInfo')
matchinfoattr = assertget(matchinfo, '@attributes')
match_dict['game_day'] = int(assertget(matchinfoattr, 'MatchDay'))
match_dict['venue'] = str(assertget(matchinfoattr, 'Venue_id'))
match_dict['game_date'] = datetime.strptime(
assertget(matchinfo, 'Date'), '%Y-%m-%d %H:%M:%S'
)
teamdata = assertget(match, 'TeamData')
for team in teamdata:
teamattr = assertget(team, '@attributes')
side = assertget(teamattr, 'Side')
teamid = assertget(teamattr, 'TeamRef')
if side == 'Home':
match_dict['home_team_id'] = int(teamid[1:])
else:
match_dict['away_team_id'] = int(teamid[1:])
matches[match_dict['game_id']] = match_dict
return matches
class _F9JSONParser(OptaJSONParser):
def get_feed(self) -> Dict[str, Any]:
for node in self.root:
if 'OptaFeed' in node['data'].keys():
return node
raise MissingDataError
def get_doc(self) -> Dict[str, Any]:
f9 = self.get_feed()
data = assertget(f9, 'data')
optafeed = assertget(data, 'OptaFeed')
optadocument = assertget(optafeed, 'OptaDocument')[0]
return optadocument
def extract_games(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
attr = assertget(optadocument, '@attributes')
venue = assertget(optadocument, 'Venue')
matchdata = assertget(optadocument, 'MatchData')
matchofficial = assertget(matchdata, 'MatchOfficial')
matchinfo = assertget(matchdata, 'MatchInfo')
stat = assertget(matchdata, 'Stat')
assert stat['@attributes']['Type'] == 'match_time'
teamdata = assertget(matchdata, 'TeamData')
scores = {}
for t in teamdata:
scores[t['@attributes']['Side']] = t['@attributes']['Score']
game_id = int(assertget(attr, 'uID')[1:])
game_dict = {
game_id: dict(
game_id=game_id,
venue=str(
venue['@attributes']['uID']
), # The venue's name is not included in this stream
referee_id=int(matchofficial['@attributes']['uID'].replace('o', '')),
game_date=datetime.strptime(
assertget(matchinfo, 'Date'), '%Y%m%dT%H%M%S%z'
).replace(tzinfo=None),
attendance=int(matchinfo.get('Attendance', 0)),
duration=int(stat['@value']),
home_score=int(scores['Home']),
away_score=int(scores['Away']),
)
}
return game_dict
def extract_teams(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
root_teams = assertget(optadocument, 'Team')
teams = {}
for team in root_teams:
if 'id' in team.keys():
nameobj = team.get('nameObj')
team_id = int(team['id'])
team = dict(
team_id=team_id,
team_name=nameobj.get('name'),
)
for f in ['team_name']:
team[f] = unidecode.unidecode(team[f]) if f in team else team[f]
teams[team_id] = team
return teams
def extract_players(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
root_teams = assertget(optadocument, 'Team')
lineups = self.extract_lineups()
players = {}
for team in root_teams:
team_id = int(team['@attributes']['uID'].replace('t', ''))
for player in team['Player']:
player_id = int(player['@attributes']['uID'].replace('p', ''))
assert 'nameObj' in player['PersonName']
nameobj = player['PersonName']['nameObj']
if not nameobj.get('is_unknown'):
player = dict(
team_id=team_id,
player_id=player_id,
firstname=nameobj.get('first').strip() or None,
lastname=nameobj.get('last').strip() or None,
player_name=nameobj.get('full').strip() or None,
nickname=nameobj.get('known') or nameobj.get('full').strip() or None,
)
if player_id in lineups[team_id]['players']:
player = dict(
**player,
jersey_number=lineups[team_id]['players'][player_id]['jersey_number'],
starting_position_name=lineups[team_id]['players'][player_id][
'starting_position_name'
],
starting_position_id=lineups[team_id]['players'][player_id][
'starting_position_id'
],
is_starter=lineups[team_id]['players'][player_id]['is_starter'],
minutes_played=lineups[team_id]['players'][player_id][
'minutes_played'
],
)
for f in ['firstname', 'lastname', 'player_name', 'nickname']:
if player[f]:
player[f] = unidecode.unidecode(player[f])
players[player_id] = player
return players
def extract_referee(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
try:
rootf9 = optadocument['MatchData']['MatchOfficial']
except KeyError:
raise MissingDataError
name = rootf9['OfficialName']
nameobj = name['nameObj']
referee_id = int(rootf9['@attributes']['uID'].replace('o', ''))
referee = dict(
referee_id=referee_id,
referee_firstname=name.get('First') or nameobj.get('first'),
referee_lastname=name.get('Last') or nameobj.get('last'),
)
for f in ['referee_firstname', 'referee_lastname']:
if referee[f]:
referee[f] = unidecode.unidecode(referee[f])
return {referee_id: referee}
def extract_teamgamestats(self) -> List[Dict[str, Any]]:
optadocument = self.get_doc()
attr = assertget(optadocument, '@attributes')
game_id = int(assertget(attr, 'uID')[1:])
try:
rootf9 = optadocument['MatchData']['TeamData']
except KeyError:
raise MissingDataError
teams_gamestats = []
for team in rootf9:
attr = team['@attributes']
statsdict = {stat['@attributes']['Type']: stat['@value'] for stat in team['Stat']}
team_gamestats = dict(
game_id=game_id,
team_id=int(attr['TeamRef'].replace('t', '')),
side=attr['Side'],
score=attr['Score'],
shootout_score=attr['ShootOutScore'],
**statsdict,
)
teams_gamestats.append(team_gamestats)
return teams_gamestats
def extract_lineups(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
attr = assertget(optadocument, '@attributes')
try:
rootf9 = optadocument['MatchData']['TeamData']
except KeyError:
raise MissingDataError
matchstats = optadocument['MatchData']['Stat']
matchstats = [matchstats] if isinstance(matchstats, dict) else matchstats
matchstatsdict = {stat['@attributes']['Type']: stat['@value'] for stat in matchstats}
lineups: Dict[int, Dict[str, Any]] = {}
for team in rootf9:
# lineup attributes
team_id = int(team['@attributes']['TeamRef'].replace('t', ''))
lineups[team_id] = dict(players=dict())
# substitutes
subst = [s['@attributes'] for s in team['Substitution']]
for player in team['PlayerLineUp']['MatchPlayer']:
attr = player['@attributes']
player_id = int(attr['PlayerRef'].replace('p', ''))
playerstatsdict = {
stat['@attributes']['Type']: stat['@value'] for stat in player['Stat']
}
sub_on = next(
(
item['Time']
for item in subst
if 'Retired' not in item and item['SubOn'] == f'p{player_id}'
),
matchstatsdict['match_time'] if attr['Status'] == 'Sub' else 0,
)
sub_off = next(
(item['Time'] for item in subst if item['SubOff'] == f'p{player_id}'),
matchstatsdict['match_time'],
)
minutes_played = sub_off - sub_on
lineups[team_id]['players'][player_id] = dict(
jersey_number=attr['ShirtNumber'],
starting_position_name=attr['Position'],
starting_position_id=attr['position_id'],
is_starter=attr['Status'] == 'Start',
minutes_played=minutes_played,
**playerstatsdict,
)
return lineups
class _F24JSONParser(OptaJSONParser):
def get_feed(self) -> Dict[str, Any]:
for node in self.root:
if 'Games' in node['data'].keys():
return node
raise MissingDataError
def extract_games(self) -> Dict[int, Dict[str, Any]]:
f24 = self.get_feed()
data = assertget(f24, 'data')
games = assertget(data, 'Games')
game = assertget(games, 'Game')
attr = assertget(game, '@attributes')
game_id = int(assertget(attr, 'id'))
game_dict = {
game_id: dict(
competition_id=int(assertget(attr, 'competition_id')),
game_id=game_id,
season_id=int(assertget(attr, 'season_id')),
game_day=int(assertget(attr, 'matchday')),
home_team_id=int(assertget(attr, 'home_team_id')),
away_team_id=int(assertget(attr, 'away_team_id')),
)
}
return game_dict
def extract_events(self) -> Dict[int, Dict[str, Any]]:
f24 = self.get_feed()
data = assertget(f24, 'data')
games = assertget(data, 'Games')
game = assertget(games, 'Game')
game_attr = assertget(game, '@attributes')
game_id = int(assertget(game_attr, 'id'))
events = {}
for element in assertget(game, 'Event'):
attr = element['@attributes']
timestamp = attr['TimeStamp'].get('locale') if attr.get('TimeStamp') else None
timestamp = datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%fZ')
qualifiers = {
int(q['@attributes']['qualifier_id']): q['@attributes']['value']
for q in element.get('Q', [])
}
start_x = float(assertget(attr, 'x'))
start_y = float(assertget(attr, 'y'))
end_x = _get_end_x(qualifiers)
end_y = _get_end_y(qualifiers)
if end_x is None:
end_x = start_x
if end_y is None:
end_y = start_y
event_id = int(assertget(attr, 'event_id'))
event = dict(
game_id=game_id,
event_id=event_id,
type_id=int(assertget(attr, 'type_id')),
period_id=int(assertget(attr, 'period_id')),
minute=int(assertget(attr, 'min')),
second=int(assertget(attr, 'sec')),
timestamp=timestamp,
player_id=int(assertget(attr, 'player_id')),
team_id=int(assertget(attr, 'team_id')),
outcome=bool(int(attr.get('outcome', 1))),
start_x=start_x,
start_y=start_y,
end_x=end_x,
end_y=end_y,
assist=bool(int(attr.get('assist', 0))),
keypass=bool(int(attr.get('keypass', 0))),
qualifiers=qualifiers,
)
events[event_id] = event
return events
class _F7XMLParser(OptaXMLParser):
def get_doc(self) -> Type[objectify.ObjectifiedElement]:
optadocument = self.root.find('SoccerDocument')
return optadocument
def extract_competitions(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
competition = optadocument.Competition
stats = {}
for stat in competition.find('Stat'):
stats[stat.attrib['Type']] = stat.text
competition_id = int(competition.attrib['uID'][1:])
competition_dict = dict(
competition_id=competition_id,
season_id=int(assertget(stats, 'season_id')),
season_name=assertget(stats, 'season_name'),
competition_name=competition.Name.text,
)
return {competition_id: competition_dict}
def extract_games(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
match_info = optadocument.MatchData.MatchInfo
game_id = int(optadocument.attrib['uID'][1:])
stats = {}
for stat in optadocument.MatchData.find('Stat'):
stats[stat.attrib['Type']] = stat.text
game_dict = dict(
game_id=game_id,
venue=optadocument.Venue.Name.text,
referee_id=int(optadocument.MatchData.MatchOfficial.attrib['uID'][1:]),
game_date=datetime.strptime(match_info.Date.text, '%Y%m%dT%H%M%S%z').replace(
tzinfo=None
),
attendance=int(match_info.Attendance),
duration=int(stats['match_time']),
)
return {game_id: game_dict}
def extract_teams(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
team_elms = list(optadocument.iterchildren('Team'))
teams = {}
for team_elm in team_elms:
team_id = int(assertget(team_elm.attrib, 'uID')[1:])
team = dict(
team_id=team_id,
team_name=team_elm.Name.text,
)
teams[team_id] = team
return teams
def extract_lineups(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
stats = {}
for stat in optadocument.MatchData.find('Stat'):
stats[stat.attrib['Type']] = stat.text
lineup_elms = optadocument.MatchData.iterchildren('TeamData')
lineups = {}
for team_elm in lineup_elms:
# lineup attributes
team_id = int(team_elm.attrib['TeamRef'][1:])
lineups[team_id] = dict(
formation=team_elm.attrib['Formation'],
score=int(team_elm.attrib['Score']),
side=team_elm.attrib['Side'],
players=dict(),
)
# substitutes
subst_elms = team_elm.iterchildren('Substitution')
subst = [subst_elm.attrib for subst_elm in subst_elms]
# players
player_elms = team_elm.PlayerLineUp.iterchildren('MatchPlayer')
for player_elm in player_elms:
player_id = int(player_elm.attrib['PlayerRef'][1:])
sub_on = int(
next(
(
item['Time']
for item in subst
if 'Retired' not in item and item['SubOn'] == f'p{player_id}'
),
stats['match_time'] if player_elm.attrib['Status'] == 'Sub' else 0,
)
)
sub_off = int(
next(
(item['Time'] for item in subst if item['SubOff'] == f'p{player_id}'),
stats['match_time'],
)
)
minutes_played = sub_off - sub_on
lineups[team_id]['players'][player_id] = dict(
starting_position_id=int(player_elm.attrib['Formation_Place']),
starting_position_name=player_elm.attrib['Position'],
jersey_number=int(player_elm.attrib['ShirtNumber']),
is_starter=int(player_elm.attrib['Formation_Place']) != 0,
minutes_played=minutes_played,
)
return lineups
def extract_players(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
lineups = self.extract_lineups()
team_elms = list(optadocument.iterchildren('Team'))
players = {}
for team_elm in team_elms:
team_id = int(team_elm.attrib['uID'][1:])
for player_elm in team_elm.iterchildren('Player'):
player_id = int(player_elm.attrib['uID'][1:])
firstname = str(player_elm.find('PersonName').find('First'))
lastname = str(player_elm.find('PersonName').find('Last'))
nickname = str(player_elm.find('PersonName').find('Known'))
player = dict(
team_id=team_id,
player_id=player_id,
player_name=' '.join([firstname, lastname]),
firstname=firstname,
lastname=lastname,
nickname=nickname,
**lineups[team_id]['players'][player_id],
)
players[player_id] = player
return players
class _F24XMLParser(OptaXMLParser):
def get_doc(self) -> Type[objectify.ObjectifiedElement]:
return self.root
def extract_games(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
game_elem = optadocument.find('Game')
attr = game_elem.attrib
game_id = int(assertget(attr, 'id'))
game_dict = dict(
game_id=game_id,
competition_id=int(assertget(attr, 'competition_id')),
season_id=int(assertget(attr, 'season_id')),
game_day=int(assertget(attr, 'matchday')),
game_date=datetime.strptime(assertget(attr, 'game_date'), '%Y-%m-%dT%H:%M:%S'),
home_team_id=int(assertget(attr, 'home_team_id')),
home_score=int(assertget(attr, 'home_score')),
away_team_id=int(assertget(attr, 'away_team_id')),
away_score=int(assertget(attr, 'away_score')),
)
return {game_id: game_dict}
def extract_events(self) -> Dict[int, Dict[str, Any]]:
optadocument = self.get_doc()
game_elm = optadocument.find('Game')
attr = game_elm.attrib
game_id = int(assertget(attr, 'id'))
events = {}
for event_elm in game_elm.iterchildren('Event'):
attr = dict(event_elm.attrib)
event_id = int(attr['id'])
qualifiers = {
int(qualifier_elm.attrib['qualifier_id']): qualifier_elm.attrib.get('value')
for qualifier_elm in event_elm.iterchildren('Q')
}
start_x = float(assertget(attr, 'x'))
start_y = float(assertget(attr, 'y'))
end_x = _get_end_x(qualifiers)
end_y = _get_end_y(qualifiers)
if end_x is None:
end_x = start_x
if end_y is None:
end_y = start_y
event = dict(
game_id=game_id,
event_id=event_id,
type_id=int(assertget(attr, 'type_id')),
period_id=int(assertget(attr, 'period_id')),
minute=int(assertget(attr, 'min')),
second=int(assertget(attr, 'sec')),
timestamp=datetime.strptime(assertget(attr, 'timestamp'), '%Y-%m-%dT%H:%M:%S.%f'),
player_id=int(attr.get('player_id', 0)),
team_id=int(assertget(attr, 'team_id')),
outcome=bool(int(attr.get('outcome', 1))),
start_x=start_x,
start_y=start_y,
end_x=end_x,
end_y=end_y,
assist=bool(int(attr.get('assist', 0))),
keypass=bool(int(attr.get('keypass', 0))),
qualifiers=qualifiers,
)
events[event_id] = event
return events
class _WhoScoredParser(OptaParser):
"""Extract data from a JSON data stream scraped from WhoScored.
Parameters
----------
path : str
Path of the data file.
competition_id : int
ID of the competition to which the provided data file belongs. If
None, this information is extracted from a field 'competition_id' in
the JSON.
season_id : int
ID of the season to which the provided data file belongs. If None,
this information is extracted from a field 'season_id' in the JSON.
game_id : int
ID of the game to which the provided data file belongs. If None, this
information is extracted from a field 'game_id' in the JSON.
"""
def __init__( # noqa: C901
self,
path: str,
competition_id: Optional[int] = None,
season_id: Optional[int] = None,
game_id: Optional[int] = None,
*args: Any,
**kwargs: Any,
):
with open(path, 'rt', encoding='utf-8') as fh:
self.root = json.load(fh)
self.position_mapping = lambda formation, x, y: 'Unknown'
if competition_id is None:
try:
competition_id = int(assertget(self.root, 'competition_id'))
except AssertionError:
raise MissingDataError(
"""Could not determine the competition id. Add it to the
file path or include a field 'competition_id' in the
JSON."""
)
self.competition_id = competition_id
if season_id is None:
try:
season_id = int(assertget(self.root, 'season_id'))
except AssertionError:
raise MissingDataError(
"""Could not determine the season id. Add it to the file
path or include a field 'season_id' in the JSON."""
)
self.season_id = season_id
if game_id is None:
try:
game_id = int(assertget(self.root, 'game_id'))
except AssertionError:
raise MissingDataError(
"""Could not determine the game id. Add it to the file
path or include a field 'game_id' in the JSON."""
)
self.game_id = game_id
def get_period_id(self, event: Dict[str, Any]) -> int:
period = assertget(event, 'period')
period_id = int(assertget(period, 'value'))
return period_id
def get_period_milliseconds(self, event: Dict[str, Any]) -> int:
period_minute_limits = assertget(self.root, 'periodMinuteLimits')
period_id = self.get_period_id(event)
if period_id == 16: # Pre-match
return 0
if period_id == 14: # Post-game
return 0
minute = int(assertget(event, 'minute'))
period_minute = minute
if period_id > 1:
period_minute = minute - period_minute_limits[str(period_id - 1)]
period_second = period_minute * 60 + int(event.get('second', 0))
return period_second * 1000
def extract_games(self) -> Dict[int, Dict[str, Any]]:
team_home = assertget(self.root, 'home')
team_away = assertget(self.root, 'away')
game_id = self.game_id
game_dict = dict(
game_id=game_id,
season_id=self.season_id,
competition_id=self.competition_id,
game_day=0, # TODO: not defined in the JSON object
game_date=datetime.strptime(
assertget(self.root, 'startTime'), '%Y-%m-%dT%H:%M:%S'
), # Dates are UTC
home_team_id=int(assertget(team_home, 'teamId')),
away_team_id=int(assertget(team_away, 'teamId')),
# is_regular=None, # TODO
# is_extra_time=None, # TODO
# is_penalties=None, # TODO
# is_golden_goal=None, # TODO
# is_silver_goal=None, # TODO
# Optional fields
home_score=int(assertget(assertget(self.root['home'], 'scores'), 'fulltime')),
away_score=int(assertget(assertget(self.root['away'], 'scores'), 'fulltime')),
attendance=int(self.root.get('attendance', 0)),
venue=str(self.root.get('venueName')),
referee_id=int(self.root.get('referee', {}).get('officialId', 0)),
duration=int(self.root.get('expandedMaxMinute')),
)
return {game_id: game_dict}
def extract_players(self) -> Dict[int, Dict[str, Any]]:
player_gamestats = self.extract_playergamestats()
game_id = self.game_id
players = {}
for team in [self.root['home'], self.root['away']]:
team_id = int(assertget(team, 'teamId'))
for p in team['players']:
player_id = int(assertget(p, 'playerId'))
player = dict(
game_id=game_id,
team_id=team_id,
player_id=int(assertget(p, 'playerId')),
is_starter=bool(p.get('isFirstEleven', False)),
player_name=str(assertget(p, 'name')),
age=int(p['age']),
# nation_code=None,
# line_code=str(assertget(p, "position")),
# preferred_foot=None,
# gender=None,
height=float(p.get('height', float('NaN'))),
weight=float(p.get('weight', float('NaN'))),
minutes_played=player_gamestats[player_id]['minutes_played'],
jersey_number=player_gamestats[player_id]['jersey_number'],
starting_position_id=0, # TODO
starting_position_name=player_gamestats[player_id]['position_code'],
)
for f in ['player_name']:
if player[f]:
player[f] = unidecode.unidecode(player[f])
players[player_id] = player
return players
def extract_substitutions(self) -> Dict[int, Dict[str, Any]]:
game_id = self.game_id
subs = {}
subonevents = [e for e in self.root['events'] if e['type'].get('value') == 19]
for e in subonevents:
sub_id = int(assertget(e, 'playerId'))
sub = dict(
game_id=game_id,
team_id=int(assertget(e, 'teamId')),
period_id=int(assertget(assertget(e, 'period'), 'value')),
period_milliseconds=self.get_period_milliseconds(e),
player_in_id=int(assertget(e, 'playerId')),
player_out_id=int(assertget(e, 'relatedPlayerId')),
)
subs[sub_id] = sub
return subs
def extract_positions(self) -> Dict[int, Dict[str, Any]]: # noqa: C901
game_id = self.game_id
positions = {}
for t in [self.root['home'], self.root['away']]:
team_id = int(assertget(t, 'teamId'))
for f in assertget(t, 'formations'):
fpositions = assertget(f, 'formationPositions')
playersIds = assertget(f, 'playerIds')
formation = assertget(f, 'formationName')
period_end_minutes = assertget(self.root, 'periodEndMinutes')
period_minute_limits = assertget(self.root, 'periodMinuteLimits')
start_minute = int(assertget(f, 'startMinuteExpanded'))
end_minute = int(assertget(f, 'endMinuteExpanded'))
for period_id in sorted(period_end_minutes.keys()):
if period_end_minutes[period_id] > start_minute:
break
period_id = int(period_id)
period_minute = start_minute
if period_id > 1:
period_minute = start_minute - period_minute_limits[str(period_id - 1)]
for i, p in enumerate(fpositions):
x = float(assertget(p, 'vertical'))
y = float(assertget(p, 'horizontal'))
try:
position_code = self.position_mapping(formation, x, y)
except KeyError:
position_code = 'Unknown'
pos = dict(
game_id=game_id,
team_id=team_id,
period_id=period_id,
period_milliseconds=(period_minute * 60 * 1000),
start_milliseconds=(start_minute * 60 * 1000),
end_milliseconds=(end_minute * 60 * 1000),
formation_scheme=formation,
player_id=int(playersIds[i]),
player_position=position_code,
player_position_x=x,
player_position_y=y,
)
positions[team_id] = pos
return positions
def extract_teams(self) -> Dict[int, Dict[str, Any]]:
teams = {}
for t in [self.root['home'], self.root['away']]:
team_id = int(assertget(t, 'teamId'))
team = dict(
team_id=team_id,
team_name=assertget(t, 'name'),
)
for f in ['team_name']:
if team[f]:
team[f] = unidecode.unidecode(team[f])
teams[team_id] = team
return teams
def extract_referee(self) -> Dict[int, Dict[str, Any]]:
if 'referee' not in self.root:
return {
0: dict(referee_id=0, first_name='Unkown', last_name='Unkown', short_name='Unkown')
}
r = self.root['referee']
referee_id = int(assertget(r, 'officialId'))
referee = dict(
referee_id=referee_id,
first_name=r.get('firstName'),
last_name=r.get('lastName'),
short_name=r.get('name'),
)
for f in ['first_name', 'last_name', 'short_name']:
if referee[f]:
referee[f] = unidecode.unidecode(referee[f])
return {referee_id: referee}
def extract_teamgamestats(self) -> List[Dict[str, Any]]:
game_id = self.game_id
teams_gamestats = []
teams = [self.root['home'], self.root['away']]
for team in teams:
statsdict = {}
for name in team['stats']:
if isinstance(team['stats'][name], dict):
statsdict[camel_to_snake(name)] = sum(team['stats'][name].values())
scores = assertget(team, 'scores')
team_gamestats = dict(
game_id=game_id,
team_id=int(assertget(team, 'teamId')),
side=assertget(team, 'field'),
score=assertget(scores, 'fulltime'),
shootout_score=scores.get('penalty', 0),
**{k: statsdict[k] for k in statsdict if not k.endswith('Success')},
)
teams_gamestats.append(team_gamestats)
return teams_gamestats
def extract_playergamestats(self) -> Dict[int, Dict[str, Any]]: # noqa: C901
game_id = self.game_id
players_gamestats = {}
for team in [self.root['home'], self.root['away']]:
team_id = int(assertget(team, 'teamId'))
for player in team['players']:
statsdict = {
camel_to_snake(name): sum(stat.values())
for name, stat in player['stats'].items()
}
stats = [k for k in statsdict if not k.endswith('Success')]
player_id = int(assertget(player, 'playerId'))
p = dict(
game_id=game_id,
team_id=team_id,
player_id=player_id,
is_starter=bool(player.get('isFirstEleven', False)),
position_code=player.get('position', None),
# optional fields
jersey_number=int(player.get('shirtNo', 0)),
mvp=bool(player.get('isManOfTheMatch', False)),
**{k: statsdict[k] for k in stats},
)
if 'subbedInExpandedMinute' in player:
p['minute_start'] = player['subbedInExpandedMinute']
if 'subbedOutExpandedMinute' in player:
p['minute_end'] = player['subbedOutExpandedMinute']
# Did not play
p['minutes_played'] = 0
# Played the full game
if p['is_starter'] and 'minute_end' not in p:
p['minute_start'] = 0
p['minute_end'] = self.root['expandedMaxMinute']
p['minutes_played'] = self.root['expandedMaxMinute']
# Started but substituted out
elif p['is_starter'] and 'minute_end' in p:
p['minute_start'] = 0
p['minutes_played'] = p['minute_end']
# Substitud in and played the remainder of the game
elif 'minute_start' in p and 'minute_end' not in p:
p['minute_end'] = self.root['expandedMaxMinute']
p['minutes_played'] = self.root['expandedMaxMinute'] - p['minute_start']
# Substitud in and out
elif 'minute_start' in p and 'minute_end' in p:
p['minutes_played'] = p['minute_end'] - p['minute_start']
players_gamestats[player_id] = p
return players_gamestats
def extract_events(self) -> Dict[int, Dict[str, Any]]:
events = {}
game_id = self.game_id
time_start_str = str(assertget(self.root, 'startTime'))
time_start = datetime.strptime(time_start_str, '%Y-%m-%dT%H:%M:%S')
for attr in self.root['events']:
qualifiers = {}
qualifiers = {
int(q['type']['value']): q.get('value', True) for q in attr.get('qualifiers', [])
}
start_x = float(assertget(attr, 'x'))
start_y = float(assertget(attr, 'y'))
end_x = _get_end_x(qualifiers)
end_y = _get_end_y(qualifiers)
if end_x is None:
end_x = start_x
if end_y is None:
end_y = start_y
eventtype = attr.get('type', {})
period = attr.get('period', {})
outcome = attr.get('outcomeType', {'value': 1})
eventIdKey = 'eventId'
if 'id' in attr:
eventIdKey = 'id'
minute = int(assertget(attr, 'expandedMinute'))
second = int(attr.get('second', 0))
event_id = int(assertget(attr, eventIdKey))
event = dict(
game_id=game_id,
event_id=event_id,
type_id=int(assertget(eventtype, 'value')),
period_id=int(assertget(period, 'value')),
minute=minute,
second=second,
timestamp=(time_start + timedelta(seconds=(minute * 60 + second))),
player_id=int(attr.get('playerId', 0)),
team_id=int(assertget(attr, 'teamId')),
outcome=bool(int(outcome.get('value', 1))),
start_x=start_x,
start_y=start_y,
end_x=end_x,
end_y=end_y,
assist=bool(int(attr.get('assist', 0))),
keypass=bool(int(attr.get('keypass', 0))),
qualifiers=qualifiers,
)
events[event_id] = event
return events
_jsonparsers = {'f1': _F1JSONParser, 'f9': _F9JSONParser, 'f24': _F24JSONParser}
_xmlparsers = {'f7': _F7XMLParser, 'f24': _F24XMLParser}
_whoscoredparsers = {'whoscored': _WhoScoredParser}
def assertget(dictionary: Dict[str, Any], key: str) -> Any:
value = dictionary.get(key)
assert value is not None, 'KeyError: ' + key + ' not found in ' + str(dictionary)
return value
def camel_to_snake(name: str) -> str:
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def _get_end_x(qualifiers: Dict[int, Any]) -> Optional[float]:
try:
# pass
if 140 in qualifiers:
return float(qualifiers[140])
# blocked shot
if 146 in qualifiers:
return float(qualifiers[146])
# passed the goal line
if 102 in qualifiers:
return float(100)
return None
except ValueError:
return None
def _get_end_y(qualifiers: Dict[int, Any]) -> Optional[float]:
try:
# pass
if 141 in qualifiers:
return float(qualifiers[141])
# blocked shot
if 147 in qualifiers:
return float(qualifiers[147])
# passed the goal line
if 102 in qualifiers:
return float(qualifiers[102])
return None
except ValueError:
return None
_eventtypesdf = pd.DataFrame(
[
(1, 'pass'),
(2, 'offside pass'),
(3, 'take on'),
(4, 'foul'),
(5, 'out'),
(6, 'corner awarded'),
(7, 'tackle'),
(8, 'interception'),
(9, 'turnover'),
(10, 'save'),
(11, 'claim'),
(12, 'clearance'),
(13, 'miss'),
(14, 'post'),
(15, 'attempt saved'),
(16, 'goal'),
(17, 'card'),
(18, 'player off'),
(19, 'player on'),
(20, 'player retired'),
(21, 'player returns'),
(22, 'player becomes goalkeeper'),
(23, 'goalkeeper becomes player'),
(24, 'condition change'),
(25, 'official change'),
(26, 'unknown26'),
(27, 'start delay'),
(28, 'end delay'),
(29, 'unknown29'),
(30, 'end'),
(31, 'unknown31'),
(32, 'start'),
(33, 'unknown33'),
(34, 'team set up'),
(35, 'player changed position'),
(36, 'player changed jersey number'),
(37, 'collection end'),
(38, 'temp_goal'),
(39, 'temp_attempt'),
(40, 'formation change'),
(41, 'punch'),
(42, 'good skill'),
(43, 'deleted event'),
(44, 'aerial'),
(45, 'challenge'),
(46, 'unknown46'),
(47, 'rescinded card'),
(48, 'unknown46'),
(49, 'ball recovery'),
(50, 'dispossessed'),
(51, 'error'),
(52, 'keeper pick-up'),
(53, 'cross not claimed'),
(54, 'smother'),
(55, 'offside provoked'),
(56, 'shield ball opp'),
(57, 'foul throw in'),
(58, 'penalty faced'),
(59, 'keeper sweeper'),
(60, 'chance missed'),
(61, 'ball touch'),
(62, 'unknown62'),
(63, 'temp_save'),
(64, 'resume'),
(65, 'contentious referee decision'),
(66, 'possession data'),
(67, '50/50'),
(68, 'referee drop ball'),
(69, 'failed to block'),
(70, 'injury time announcement'),
(71, 'coach setup'),
(72, 'caught offside'),
(73, 'other ball contact'),
(74, 'blocked pass'),
(75, 'delayed start'),
(76, 'early end'),
(77, 'player off pitch'),
],
columns=['type_id', 'type_name'],
)
def convert_to_actions(events: pd.DataFrame, home_team_id: int) -> pd.DataFrame:
"""
Convert Opta events to SPADL actions.
Parameters
----------
events : pd.DataFrame
DataFrame containing Opta events from a single game.
home_team_id : int
ID of the home team in the corresponding game.
Returns
-------
actions : pd.DataFrame
DataFrame with corresponding SPADL actions.
"""
actions = pd.DataFrame()
actions['game_id'] = events.game_id
actions['original_event_id'] = events.event_id.astype(object)
actions['period_id'] = events.period_id
actions['time_seconds'] = (
60 * events.minute
+ events.second
- ((events.period_id > 1) * 45 * 60)
- ((events.period_id > 2) * 45 * 60)
- ((events.period_id > 3) * 15 * 60)
- ((events.period_id > 4) * 15 * 60)
)
actions['team_id'] = events.team_id
actions['player_id'] = events.player_id
for col in ['start_x', 'end_x']:
actions[col] = events[col] / 100 * spadlconfig.field_length
for col in ['start_y', 'end_y']:
actions[col] = events[col] / 100 * spadlconfig.field_width
actions['type_id'] = events[['type_name', 'outcome', 'qualifiers']].apply(_get_type_id, axis=1)
actions['result_id'] = events[['type_name', 'outcome', 'qualifiers']].apply(
_get_result_id, axis=1
)
actions['bodypart_id'] = events.qualifiers.apply(_get_bodypart_id)
actions = (
actions[actions.type_id != spadlconfig.actiontypes.index('non_action')]
.sort_values(['game_id', 'period_id', 'time_seconds'])
.reset_index(drop=True)
)
actions = _fix_owngoals(actions)
actions = _fix_direction_of_play(actions, home_team_id)
actions = _fix_clearances(actions)
actions['action_id'] = range(len(actions))
actions = _add_dribbles(actions)
for col in [c for c in actions.columns.values if c != 'original_event_id']:
if '_id' in col:
actions[col] = actions[col].astype(int)
return actions
def _get_bodypart_id(qualifiers: Dict[int, Any]) -> int:
if 15 in qualifiers:
b = 'head'
elif 21 in qualifiers:
b = 'other'
else:
b = 'foot'
return spadlconfig.bodyparts.index(b)
def _get_result_id(args: Tuple[str, bool, Dict[int, Any]]) -> int:
e, outcome, q = args
if e == 'offside pass':
r = 'offside' # offside
elif e == 'foul':
r = 'fail'
elif e in ['attempt saved', 'miss', 'post']:
r = 'fail'
elif e == 'goal':
if 28 in q:
r = 'owngoal' # own goal, x and y must be switched
else:
r = 'success'
elif e == 'ball touch':
r = 'fail'
elif outcome:
r = 'success'
else:
r = 'fail'
return spadlconfig.results.index(r)
def _get_type_id(args: Tuple[str, bool, Dict[int, Any]]) -> int: # noqa: C901
eventname, outcome, q = args
if eventname in ('pass', 'offside pass'):
cross = 2 in q
freekick = 5 in q
corner = 6 in q
throw_in = 107 in q
goalkick = 124 in q
if throw_in:
a = 'throw_in'
elif freekick and cross:
a = 'freekick_crossed'
elif freekick:
a = 'freekick_short'
elif corner and cross:
a = 'corner_crossed'
elif corner:
a = 'corner_short'
elif cross:
a = 'cross'
elif goalkick:
a = 'goalkick'
else:
a = 'pass'
elif eventname == 'take on':
a = 'take_on'
elif eventname == 'foul' and outcome is False:
a = 'foul'
elif eventname == 'tackle':
a = 'tackle'
elif eventname in ('interception', 'blocked pass'):
a = 'interception'
elif eventname in ['miss', 'post', 'attempt saved', 'goal']:
if 9 in q:
a = 'shot_penalty'
elif 26 in q:
a = 'shot_freekick'
else:
a = 'shot'
elif eventname == 'save':
a = 'keeper_save'
elif eventname == 'claim':
a = 'keeper_claim'
elif eventname == 'punch':
a = 'keeper_punch'
elif eventname == 'keeper pick-up':
a = 'keeper_pick_up'
elif eventname == 'clearance':
a = 'clearance'
elif eventname == 'ball touch' and outcome is False:
a = 'bad_touch'
else:
a = 'non_action'
return spadlconfig.actiontypes.index(a)
def _fix_owngoals(actions: pd.DataFrame) -> pd.DataFrame:
owngoals_idx = (actions.result_id == spadlconfig.results.index('owngoal')) & (
actions.type_id == spadlconfig.actiontypes.index('shot')
)
actions.loc[owngoals_idx, 'end_x'] = (
spadlconfig.field_length - actions[owngoals_idx].end_x.values
)
actions.loc[owngoals_idx, 'end_y'] = (
spadlconfig.field_width - actions[owngoals_idx].end_y.values
)
actions.loc[owngoals_idx, 'type_id'] = spadlconfig.actiontypes.index('bad_touch')
return actions
| mit |
rabipanda/tensorflow | tensorflow/contrib/metrics/python/ops/metric_ops_test.py | 4 | 262821 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metric_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
NAN = float('nan')
metrics = metrics_lib
def _enqueue_vector(sess, queue, values, shape=None):
if not shape:
shape = (1, len(values))
dtype = queue.dtypes[0]
sess.run(
queue.enqueue(constant_op.constant(values, dtype=dtype, shape=shape)))
def _binary_2d_label_to_sparse_value(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
batch = 0
for row in labels:
label = 0
xi = 0
for x in row:
if x == 1:
indices.append([batch, xi])
values.append(label)
xi += 1
else:
assert x == 0
label += 1
batch += 1
shape = [len(labels), len(labels[0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64), np.array(values, np.int64),
np.array(shape, np.int64))
def _binary_2d_label_to_sparse(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return sparse_tensor.SparseTensor.from_value(
_binary_2d_label_to_sparse_value(labels))
def _binary_3d_label_to_sparse_value(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
for d0, labels_d0 in enumerate(labels):
for d1, labels_d1 in enumerate(labels_d0):
d2 = 0
for class_id, label in enumerate(labels_d1):
if label == 1:
values.append(class_id)
indices.append([d0, d1, d2])
d2 += 1
else:
assert label == 0
shape = [len(labels), len(labels[0]), len(labels[0][0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64), np.array(values, np.int64),
np.array(shape, np.int64))
def _binary_3d_label_to_sparse(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return sparse_tensor.SparseTensor.from_value(
_binary_3d_label_to_sparse_value(labels))
def _assert_nan(test_case, actual):
test_case.assertTrue(math.isnan(actual), 'Expected NAN, got %s.' % actual)
def _assert_metric_variables(test_case, expected):
test_case.assertEquals(
set(expected), set(v.name for v in variables.local_variables()))
test_case.assertEquals(
set(expected),
set(v.name for v in ops.get_collection(ops.GraphKeys.METRIC_VARIABLES)))
class StreamingMeanTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean(array_ops.ones([4, 3]))
_assert_metric_variables(self, ('mean/count:0', 'mean/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(1.475, sess.run(update_op), 5)
self.assertAlmostEqual(12.4 / 6.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def test1dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test1dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1,))
_enqueue_vector(sess, weights_queue, 1, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 1, shape=(1,))
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test2dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
def test2dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(2,))
_enqueue_vector(sess, weights_queue, [1, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [1, 0], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 0], shape=(2,))
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
class StreamingMeanTensorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_tensor(array_ops.ones([4, 3]))
_assert_metric_variables(self,
('mean/total_tensor:0', 'mean/count_tensor:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_tensor(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_tensor(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean))
def testMultiDimensional(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[1, 2], [1, 2]]],
shape=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[3, 4], [9, 10]]],
shape=(2, 2, 2))
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(2):
sess.run(update_op)
self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]], sess.run(mean))
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
self.assertAllClose([[0, 1]], sess.run(update_op), 5)
self.assertAllClose([[-2.1, 5.05]], sess.run(update_op), 5)
self.assertAllClose([[2.3 / 3., 10.1 / 3.]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean), 5)
def testWeighted1d(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[3.25, 0.5]], sess.run(mean), 5)
def testWeighted2d_1(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-2.1, 0.5]], sess.run(mean), 5)
def testWeighted2d_2(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[0, 0.5]], sess.run(mean), 5)
class StreamingAccuracyTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
name='my_accuracy')
_assert_metric_variables(self,
('my_accuracy/count:0', 'my_accuracy/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 4))
with self.assertRaises(ValueError):
metrics.streaming_accuracy(predictions, labels)
def testPredictionsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 3))
weights = array_ops.ones((9, 3))
with self.assertRaises(ValueError):
metrics.streaming_accuracy(predictions, labels, weights)
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=2)
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_accuracy = accuracy.eval()
for _ in range(10):
self.assertEqual(initial_accuracy, accuracy.eval())
def testMultipleUpdates(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, accuracy.eval())
def testEffectivelyEquivalentSizes(self):
predictions = array_ops.ones((40, 1))
labels = array_ops.ones((40,))
with self.test_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, update_op.eval())
self.assertEqual(1.0, accuracy.eval())
def testEffectivelyEquivalentSizesWithStaicShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = array_ops.expand_dims(ops.convert_to_tensor([100, 1, 1]),
1) # shape 3, 1
with self.test_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(), .95)
self.assertGreater(accuracy.eval(), .95)
def testEffectivelyEquivalentSizesWithDynamicallyShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = [[100], [1], [1]] # shape 3, 1
weights_placeholder = array_ops.placeholder(
dtype=dtypes_lib.int32, name='weights')
feed_dict = {weights_placeholder: weights}
with self.test_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights_placeholder)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(feed_dict=feed_dict), .95)
self.assertGreater(accuracy.eval(feed_dict=feed_dict), .95)
def testMultipleUpdatesWithWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.int64, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
weights = weights_queue.dequeue()
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, accuracy.eval())
class StreamingTruePositivesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_positives((0, 1, 0), (0, 1, 1))
_assert_metric_variables(self, ('true_positives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
tp, tp_update_op = metrics.streaming_true_positives(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tp.eval())
self.assertEqual(1, tp_update_op.eval())
self.assertEqual(1, tp.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
tp, tp_update_op = metrics.streaming_true_positives(
predictions, labels, weights=37.0)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tp.eval())
self.assertEqual(37.0, tp_update_op.eval())
self.assertEqual(37.0, tp.eval())
class StreamingFalseNegativesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negatives((0, 1, 0), (0, 1, 1))
_assert_metric_variables(self, ('false_negatives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
fn, fn_update_op = metrics.streaming_false_negatives(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fn.eval())
self.assertEqual(2, fn_update_op.eval())
self.assertEqual(2, fn.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
fn, fn_update_op = metrics.streaming_false_negatives(
predictions, labels, weights=((3.0,), (5.0,), (7.0,)))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fn.eval())
self.assertEqual(8.0, fn_update_op.eval())
self.assertEqual(8.0, fn.eval())
class StreamingFalsePositivesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positives((0, 1, 0), (0, 1, 1))
_assert_metric_variables(self, ('false_positives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
fp, fp_update_op = metrics.streaming_false_positives(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fp.eval())
self.assertEqual(4, fp_update_op.eval())
self.assertEqual(4, fp.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
fp, fp_update_op = metrics.streaming_false_positives(
predictions,
labels,
weights=((1.0, 2.0, 3.0, 5.0), (7.0, 11.0, 13.0, 17.0), (19.0, 23.0,
29.0, 31.0)))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fp.eval())
self.assertEqual(42.0, fp_update_op.eval())
self.assertEqual(42.0, fp.eval())
class StreamingTrueNegativesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_negatives((0, 1, 0), (0, 1, 1))
_assert_metric_variables(self, ('true_negatives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
tn, tn_update_op = metrics.streaming_true_negatives(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tn.eval())
self.assertEqual(5, tn_update_op.eval())
self.assertEqual(5, tn.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
tn, tn_update_op = metrics.streaming_true_negatives(
predictions, labels, weights=((0.0, 2.0, 3.0, 5.0),))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tn.eval())
self.assertEqual(15.0, tn_update_op.eval())
self.assertEqual(15.0, tn.eval())
class StreamingTruePositivesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_positives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_metric_variables(self, ('true_positives:0',))
def testUnweighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
tp, tp_update_op = metrics.streaming_true_positives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tp.eval())
self.assertAllEqual((3, 1, 0), tp_update_op.eval())
self.assertAllEqual((3, 1, 0), tp.eval())
def testWeighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
tp, tp_update_op = metrics.streaming_true_positives_at_thresholds(
predictions, labels, weights=37.0, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tp.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp_update_op.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp.eval())
class StreamingFalseNegativesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negatives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(
0.15,
0.5,
0.85,
))
_assert_metric_variables(self, ('false_negatives:0',))
def testUnweighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
fn, fn_update_op = metrics.streaming_false_negatives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fn.eval())
self.assertAllEqual((0, 2, 3), fn_update_op.eval())
self.assertAllEqual((0, 2, 3), fn.eval())
def testWeighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
fn, fn_update_op = metrics.streaming_false_negatives_at_thresholds(
predictions,
labels,
weights=((3.0,), (5.0,), (7.0,)),
thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fn.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn_update_op.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn.eval())
class StreamingFalsePositivesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_metric_variables(self, ('false_positives:0',))
def testUnweighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
fp, fp_update_op = metrics.streaming_false_positives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fp.eval())
self.assertAllEqual((7, 4, 2), fp_update_op.eval())
self.assertAllEqual((7, 4, 2), fp.eval())
def testWeighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
fp, fp_update_op = metrics.streaming_false_positives_at_thresholds(
predictions,
labels,
weights=((1.0, 2.0, 3.0, 5.0), (7.0, 11.0, 13.0, 17.0), (19.0, 23.0,
29.0, 31.0)),
thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fp.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp_update_op.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp.eval())
class StreamingTrueNegativesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_negatives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_metric_variables(self, ('true_negatives:0',))
def testUnweighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
tn, tn_update_op = metrics.streaming_true_negatives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tn.eval())
self.assertAllEqual((2, 5, 7), tn_update_op.eval())
self.assertAllEqual((2, 5, 7), tn.eval())
def testWeighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
tn, tn_update_op = metrics.streaming_true_negatives_at_thresholds(
predictions,
labels,
weights=((0.0, 2.0, 3.0, 5.0),),
thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tn.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn_update_op.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn.eval())
class StreamingPrecisionTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_precision(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self, ('precision/false_positives/count:0',
'precision/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_precision = precision.eval()
for _ in range(10):
self.assertEqual(initial_precision, precision.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(inputs)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op))
self.assertAlmostEqual(1, precision.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, precision.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=constant_op.constant([[2], [5]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted1d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=constant_op.constant([[2], [5]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.streaming_precision(
predictions,
labels,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted2d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions,
labels,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(1 - inputs)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0, precision.eval())
def testZeroTrueAndFalsePositivesGivesZeroPrecision(self):
predictions = constant_op.constant([0, 0, 0, 0])
labels = constant_op.constant([0, 0, 0, 0])
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0.0, precision.eval())
class StreamingRecallTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_recall(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('recall/false_negatives/count:0', 'recall/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_recall = recall.eval()
for _ in range(10):
self.assertEqual(initial_recall, recall.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, recall.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, recall.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
recall, update_op = metrics.streaming_recall(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
recall, update_op = metrics.streaming_recall(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
def testZeroTruePositivesAndFalseNegativesGivesZeroRecall(self):
predictions = array_ops.zeros((1, 4))
labels = array_ops.zeros((1, 4))
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
class StreamingFPRTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positive_rate(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('false_positive_rate/false_positives/count:0',
'false_positive_rate/true_negatives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_false_positive_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_positive_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_fpr = fpr.eval()
for _ in range(10):
self.assertEqual(initial_fpr, fpr.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fpr.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, fpr.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fp = 2.0 + 5.0
weighted_f = (2.0 + 2.0) + (5.0 + 5.0)
expected_fpr = weighted_fp / weighted_f
self.assertAlmostEqual(expected_fpr, update_op.eval())
self.assertAlmostEqual(expected_fpr, fpr.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fp = 1.0 + 3.0
weighted_f = (1.0 + 4.0) + (2.0 + 3.0)
expected_fpr = weighted_fp / weighted_f
self.assertAlmostEqual(expected_fpr, update_op.eval())
self.assertAlmostEqual(expected_fpr, fpr.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, fpr.eval())
def testZeroFalsePositivesAndTrueNegativesGivesZeroFPR(self):
predictions = array_ops.ones((1, 4))
labels = array_ops.ones((1, 4))
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fpr.eval())
class StreamingFNRTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negative_rate(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('false_negative_rate/false_negatives/count:0',
'false_negative_rate/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_false_negative_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_negative_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_fnr = fnr.eval()
for _ in range(10):
self.assertEqual(initial_fnr, fnr.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fnr.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, fnr.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fn = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_fnr = weighted_fn / weighted_t
self.assertAlmostEqual(expected_fnr, update_op.eval())
self.assertAlmostEqual(expected_fnr, fnr.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fn = 2.0 + 4.0
weighted_t = (2.0 + 3.0) + (1.0 + 4.0)
expected_fnr = weighted_fn / weighted_t
self.assertAlmostEqual(expected_fnr, update_op.eval())
self.assertAlmostEqual(expected_fnr, fnr.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, fnr.eval())
def testZeroFalseNegativesAndTruePositivesGivesZeroFNR(self):
predictions = array_ops.zeros((1, 4))
labels = array_ops.zeros((1, 4))
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fnr.eval())
class StreamingCurvePointsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metric_ops.streaming_curve_points(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('curve_points/true_positives:0', 'curve_points/false_negatives:0',
'curve_points/false_positives:0', 'curve_points/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
points, _ = metric_ops.streaming_curve_points(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [points])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metric_ops.streaming_curve_points(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def _testValueTensorIsIdempotent(self, curve):
predictions = constant_op.constant(
np.random.uniform(size=(10, 3)), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np.random.uniform(high=2, size=(10, 3)), dtype=dtypes_lib.float32)
points, update_op = metric_ops.streaming_curve_points(
labels, predictions=predictions, curve=curve)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
initial_points = points.eval()
sess.run(update_op)
self.assertAllClose(initial_points, points.eval())
def testValueTensorIsIdempotentROC(self):
self._testValueTensorIsIdempotent(curve='ROC')
def testValueTensorIsIdempotentPR(self):
self._testValueTensorIsIdempotent(curve='PR')
def _testCase(self, labels, predictions, curve, expected_points):
with self.test_session() as sess:
predictions_tensor = constant_op.constant(
predictions, dtype=dtypes_lib.float32)
labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.float32)
points, update_op = metric_ops.streaming_curve_points(
labels=labels_tensor,
predictions=predictions_tensor,
num_thresholds=3,
curve=curve)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAllClose(expected_points, points.eval())
def testEdgeCasesROC(self):
self._testCase([[1]], [[1]], 'ROC', [[0, 1], [0, 1], [0, 0]])
self._testCase([[0]], [[0]], 'ROC', [[1, 1], [0, 1], [0, 1]])
self._testCase([[0]], [[1]], 'ROC', [[1, 1], [1, 1], [0, 1]])
self._testCase([[1]], [[0]], 'ROC', [[0, 1], [0, 0], [0, 0]])
def testManyValuesROC(self):
self._testCase([[1.0, 0.0, 0.0, 1.0, 1.0, 1.0]],
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]], 'ROC',
[[1.0, 1.0], [0.0, 0.75], [0.0, 0.0]])
def testEdgeCasesPR(self):
self._testCase([[1]], [[1]], 'PR', [[1, 1], [1, 1], [0, 1]])
self._testCase([[0]], [[0]], 'PR', [[1, 0], [1, 1], [1, 1]])
self._testCase([[0]], [[1]], 'PR', [[1, 0], [1, 0], [1, 1]])
self._testCase([[1]], [[0]], 'PR', [[1, 1], [0, 1], [0, 1]])
def testManyValuesPR(self):
self._testCase([[1.0, 0.0, 0.0, 1.0, 1.0, 1.0]],
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]], 'PR',
[[1.0, 4.0 / 6.0], [0.75, 1.0], [0.0, 1.0]])
def _np_auc(predictions, labels, weights=None):
"""Computes the AUC explicitly using Numpy.
Args:
predictions: an ndarray with shape [N].
labels: an ndarray with shape [N].
weights: an ndarray with shape [N].
Returns:
the area under the ROC curve.
"""
if weights is None:
weights = np.ones(np.size(predictions))
is_positive = labels > 0
num_positives = np.sum(weights[is_positive])
num_negatives = np.sum(weights[~is_positive])
# Sort descending:
inds = np.argsort(-predictions)
sorted_labels = labels[inds]
sorted_weights = weights[inds]
is_positive = sorted_labels > 0
tp = np.cumsum(sorted_weights * is_positive) / num_positives
return np.sum((sorted_weights * tp)[~is_positive]) / num_negatives
class StreamingAUCTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_auc(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('auc/true_positives:0', 'auc/false_negatives:0',
'auc/false_positives:0', 'auc/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_auc(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_auc(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
auc, update_op = metrics.streaming_auc(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_auc = auc.eval()
for _ in range(10):
self.assertAlmostEqual(initial_auc, auc.eval(), 5)
def testPredictionsOutOfRange(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, -1, 1, -1], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
_, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertRaises(errors_impl.InvalidArgumentError, update_op.eval)
def testAllCorrect(self):
self.allCorrectAsExpected('ROC')
def allCorrectAsExpected(self, curve):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
auc, update_op = metrics.streaming_auc(predictions, labels, curve=curve)
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, auc.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op))
self.assertAlmostEqual(0.5, auc.eval())
def testWeighted1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([2], shape=(1, 1))
auc, update_op = metrics.streaming_auc(
predictions, labels, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(0.5, auc.eval(), 5)
def testWeighted2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([1, 2, 3, 4], shape=(1, 4))
auc, update_op = metrics.streaming_auc(
predictions, labels, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.7, sess.run(update_op), 5)
self.assertAlmostEqual(0.7, auc.eval(), 5)
def testAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 1], shape=(1, 4))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.79166, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-3)
def testAnotherAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1], shape=(1, 7))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.610317, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-3)
def testThirdAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.90277, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-3)
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0, sess.run(update_op))
self.assertAlmostEqual(0, auc.eval())
def testZeroTruePositivesAndFalseNegativesGivesOneAUC(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def testRecallOneAndPrecisionOneGivesOnePRAUC(self):
with self.test_session() as sess:
predictions = array_ops.ones([4], dtype=dtypes_lib.float32)
labels = array_ops.ones([4])
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=num_samples)
noise = np.random.normal(0.0, scale=0.2, size=num_samples)
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
def _enqueue_as_batches(x, enqueue_ops):
x_batches = x.astype(np.float32).reshape((num_batches, batch_size))
x_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(num_batches):
enqueue_ops[i].append(x_queue.enqueue(x_batches[i, :]))
return x_queue.dequeue()
for weights in (None, np.ones(num_samples),
np.random.exponential(scale=1.0, size=num_samples)):
expected_auc = _np_auc(predictions, labels, weights)
with self.test_session() as sess:
enqueue_ops = [[] for i in range(num_batches)]
tf_predictions = _enqueue_as_batches(predictions, enqueue_ops)
tf_labels = _enqueue_as_batches(labels, enqueue_ops)
tf_weights = (
_enqueue_as_batches(weights, enqueue_ops)
if weights is not None else None)
for i in range(num_batches):
sess.run(enqueue_ops[i])
auc, update_op = metrics.streaming_auc(
tf_predictions,
tf_labels,
curve='ROC',
num_thresholds=500,
weights=tf_weights)
sess.run(variables.local_variables_initializer())
for i in range(num_batches):
sess.run(update_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_auc, auc.eval(), 2)
class StreamingDynamicAUCTest(test.TestCase):
def setUp(self):
super(StreamingDynamicAUCTest, self).setUp()
np.random.seed(1)
ops.reset_default_graph()
def testUnknownCurve(self):
with self.assertRaisesRegexp(
ValueError, 'curve must be either ROC or PR, TEST_CURVE unknown'):
metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
curve='TEST_CURVE')
def testVars(self):
metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)), predictions=array_ops.ones((10, 1)))
_assert_metric_variables(self, [
'dynamic_auc/concat_labels/array:0', 'dynamic_auc/concat_labels/size:0',
'dynamic_auc/concat_preds/array:0', 'dynamic_auc/concat_preds/size:0'
])
def testMetricsCollection(self):
my_collection_name = '__metrics__'
auc, _ = metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertEqual(ops.get_collection(my_collection_name), [auc])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in xrange(10):
sess.run(update_op)
# Then verify idempotency.
initial_auc = auc.eval()
for _ in xrange(10):
self.assertAlmostEqual(initial_auc, auc.eval(), 5)
def testAllLabelsOnes(self):
with self.test_session() as sess:
predictions = constant_op.constant([1., 1., 1.])
labels = constant_op.constant([1, 1, 1])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, auc.eval())
def testAllLabelsZeros(self):
with self.test_session() as sess:
predictions = constant_op.constant([1., 1., 1.])
labels = constant_op.constant([0, 0, 0])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, auc.eval())
def testNonZeroOnePredictions(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2.5, -2.5, 2.5, -2.5], dtype=dtypes_lib.float32)
labels = constant_op.constant([1, 0, 1, 0])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(auc.eval(), 1.0)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs)
labels = constant_op.constant(inputs)
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, auc.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant([1, 0, 1, 0])
labels = constant_op.constant([0, 1, 1, 0])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.5, auc.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0, auc.eval())
def testExceptionOnIncompatibleShapes(self):
with self.test_session() as sess:
predictions = array_ops.ones([5])
labels = array_ops.zeros([6])
with self.assertRaisesRegexp(ValueError, 'Shapes .* are incompatible'):
_, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
def testExceptionOnGreaterThanOneLabel(self):
with self.test_session() as sess:
predictions = constant_op.constant([1, 0.5, 0], dtypes_lib.float32)
labels = constant_op.constant([2, 1, 0])
_, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'.*labels must be 0 or 1, at least one is >1.*'):
sess.run(update_op)
def testExceptionOnNegativeLabel(self):
with self.test_session() as sess:
predictions = constant_op.constant([1, 0.5, 0], dtypes_lib.float32)
labels = constant_op.constant([1, 0, -1])
_, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'.*labels must be 0 or 1, at least one is <0.*'):
sess.run(update_op)
def testWithMultipleUpdates(self):
batch_size = 10
num_batches = 100
labels = np.array([])
predictions = np.array([])
tf_labels = variables.Variable(
array_ops.ones(batch_size, dtypes_lib.int32),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.int32)
tf_predictions = variables.Variable(
array_ops.ones(batch_size),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_dynamic_auc(tf_labels, tf_predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
for _ in xrange(num_batches):
new_labels = np.random.randint(0, 2, size=batch_size)
noise = np.random.normal(0.0, scale=0.2, size=batch_size)
new_predictions = 0.4 + 0.2 * new_labels + noise
labels = np.concatenate([labels, new_labels])
predictions = np.concatenate([predictions, new_predictions])
sess.run(tf_labels.assign(new_labels))
sess.run(tf_predictions.assign(new_predictions))
sess.run(update_op)
expected_auc = _np_auc(predictions, labels)
self.assertAlmostEqual(expected_auc, auc.eval())
def testAUCPRReverseIncreasingPredictions(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8], dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 1])
auc, update_op = metrics.streaming_dynamic_auc(
labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-5)
def testAUCPRJumbledPredictions(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81], dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1])
auc, update_op = metrics.streaming_dynamic_auc(
labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-6)
def testAUCPRPredictionsLessThanHalf(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.streaming_dynamic_auc(
labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-5)
class StreamingPrecisionRecallAtEqualThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def _testResultsEqual(self, expected_dict, gotten_result):
"""Tests that 2 results (dicts) represent the same data.
Args:
expected_dict: A dictionary with keys that are the names of properties
of PrecisionRecallData and whose values are lists of floats.
gotten_result: A PrecisionRecallData object.
"""
gotten_dict = {k: t.eval() for k, t in gotten_result._asdict().items()}
self.assertItemsEqual(list(expected_dict.keys()), list(gotten_dict.keys()))
for key, expected_values in expected_dict.items():
self.assertAllClose(expected_values, gotten_dict[key])
def _testCase(self, predictions, labels, expected_result, weights=None):
"""Performs a test given a certain scenario of labels, predictions, weights.
Args:
predictions: The predictions tensor. Of type float32.
labels: The labels tensor. Of type bool.
expected_result: The expected result (dict) that maps to tensors.
weights: Optional weights tensor.
"""
with self.test_session() as sess:
predictions_tensor = constant_op.constant(
predictions, dtype=dtypes_lib.float32)
labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.bool)
weights_tensor = None
if weights:
weights_tensor = constant_op.constant(weights, dtype=dtypes_lib.float32)
gotten_result, update_op = (
metric_ops.precision_recall_at_equal_thresholds(
labels=labels_tensor,
predictions=predictions_tensor,
weights=weights_tensor,
num_thresholds=3))
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self._testResultsEqual(expected_result, gotten_result)
def testVars(self):
metric_ops.precision_recall_at_equal_thresholds(
labels=constant_op.constant([True], dtype=dtypes_lib.bool),
predictions=constant_op.constant([0.42], dtype=dtypes_lib.float32))
_assert_metric_variables(
self, ('precision_recall_at_equal_thresholds/variables/tp_buckets:0',
'precision_recall_at_equal_thresholds/variables/fp_buckets:0'))
def testVarsWithName(self):
metric_ops.precision_recall_at_equal_thresholds(
labels=constant_op.constant([True], dtype=dtypes_lib.bool),
predictions=constant_op.constant([0.42], dtype=dtypes_lib.float32),
name='foo')
_assert_metric_variables(
self, ('foo/variables/tp_buckets:0', 'foo/variables/fp_buckets:0'))
def testValuesAreIdempotent(self):
predictions = constant_op.constant(
np.random.uniform(size=(10, 3)), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np.random.uniform(size=(10, 3)) > 0.5, dtype=dtypes_lib.bool)
result, update_op = metric_ops.precision_recall_at_equal_thresholds(
labels=labels, predictions=predictions)
with self.test_session() as sess:
# Run several updates.
sess.run(variables.local_variables_initializer())
for _ in range(3):
sess.run(update_op)
# Then verify idempotency.
initial_result = {
k: value.eval().tolist()
for k, value in result._asdict().items()
}
for _ in range(3):
self._testResultsEqual(initial_result, result)
def testAllTruePositives(self):
self._testCase(
[[1]], [[True]], {
'tp': [1, 1, 1],
'fp': [0, 0, 0],
'tn': [0, 0, 0],
'fn': [0, 0, 0],
'precision': [1.0, 1.0, 1.0],
'recall': [1.0, 1.0, 1.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testAllTrueNegatives(self):
self._testCase(
[[0]], [[False]], {
'tp': [0, 0, 0],
'fp': [1, 0, 0],
'tn': [0, 1, 1],
'fn': [0, 0, 0],
'precision': [0.0, 0.0, 0.0],
'recall': [0.0, 0.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testAllFalsePositives(self):
self._testCase(
[[1]], [[False]], {
'tp': [0, 0, 0],
'fp': [1, 1, 1],
'tn': [0, 0, 0],
'fn': [0, 0, 0],
'precision': [0.0, 0.0, 0.0],
'recall': [0.0, 0.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testAllFalseNegatives(self):
self._testCase(
[[0]], [[True]], {
'tp': [1, 0, 0],
'fp': [0, 0, 0],
'tn': [0, 0, 0],
'fn': [0, 1, 1],
'precision': [1.0, 0.0, 0.0],
'recall': [1.0, 0.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testManyValues(self):
self._testCase(
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],
[[True, False, False, True, True, True]], {
'tp': [4, 3, 0],
'fp': [2, 0, 0],
'tn': [0, 2, 2],
'fn': [0, 1, 4],
'precision': [2.0 / 3.0, 1.0, 0.0],
'recall': [1.0, 0.75, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testManyValuesWithWeights(self):
self._testCase(
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],
[[True, False, False, True, True, True]], {
'tp': [1.5, 1.5, 0.0],
'fp': [2.5, 0.0, 0.0],
'tn': [0.0, 2.5, 2.5],
'fn': [0.0, 0.0, 1.5],
'precision': [0.375, 1.0, 0.0],
'recall': [1.0, 1.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
},
weights=[[0.0, 0.5, 2.0, 0.0, 0.5, 1.0]])
class StreamingSpecificityAtSensitivityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7)
_assert_metric_variables(self,
('specificity_at_sensitivity/true_positives:0',
'specificity_at_sensitivity/false_negatives:0',
'specificity_at_sensitivity/false_positives:0',
'specificity_at_sensitivity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_specificity = specificity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_specificity, specificity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.8)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op))
self.assertAlmostEqual(1.0, specificity.eval())
def testSomeCorrectLowSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted1d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [3]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, weights=weights, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted2d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, weights=weights, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(8.0 / 15.0, sess.run(update_op))
self.assertAlmostEqual(8.0 / 15.0, specificity.eval())
class StreamingSensitivityAtSpecificityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7)
_assert_metric_variables(self,
('sensitivity_at_specificity/true_positives:0',
'sensitivity_at_specificity/false_negatives:0',
'sensitivity_at_specificity/false_positives:0',
'sensitivity_at_specificity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
sensitivity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_sensitivity = sensitivity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_sensitivity, sensitivity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.8)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, specificity.eval())
def testSomeCorrectLowSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, weights=weights, specificity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.675, sess.run(update_op))
self.assertAlmostEqual(0.675, specificity.eval())
# TODO(nsilberman): Break this up into two sets of tests.
class StreamingPrecisionRecallThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'precision_at_thresholds/true_positives:0',
'precision_at_thresholds/false_positives:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
prec, _ = metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
rec, _ = metrics.streaming_recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [prec, rec])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, precision_op = metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
_, recall_op = metrics.streaming_recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(
ops.get_collection(my_collection_name), [precision_op, recall_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run([prec_op, rec_op])
# Then verify idempotency.
initial_prec = prec.eval()
initial_rec = rec.eval()
for _ in range(10):
self.assertAllClose(initial_prec, prec.eval())
self.assertAllClose(initial_rec, rec.eval())
# TODO(nsilberman): fix tests (passing but incorrect).
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertEqual(1, prec.eval())
self.assertEqual(1, rec.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.5, prec.eval())
self.assertAlmostEqual(0.5, rec.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval())
self.assertAlmostEqual(0, rec.eval())
def testWeights1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds, weights=weights)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds, weights=weights)
prec_low = prec[0]
prec_high = prec[1]
rec_low = rec[0]
rec_high = rec[1]
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testWeights2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds, weights=weights)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds, weights=weights)
prec_low = prec[0]
prec_high = prec[1]
rec_low = rec[0]
rec_high = rec[1]
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testExtremeThresholds(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
prec_low = prec[0]
prec_high = prec[1]
rec_low = rec[0]
rec_high = rec[1]
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.75, prec_low.eval())
self.assertAlmostEqual(0.0, prec_high.eval())
self.assertAlmostEqual(1.0, rec_low.eval())
self.assertAlmostEqual(0.0, rec_high.eval())
def testZeroLabelsPredictions(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval(), 6)
self.assertAlmostEqual(0, rec.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
tp = 0
fp = 0
fn = 0
tn = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 1:
tp += 1
else:
fp += 1
else:
if labels[i] == 1:
fn += 1
else:
tn += 1
epsilon = 1e-7
expected_prec = tp / (epsilon + tp + fp)
expected_rec = tp / (epsilon + tp + fn)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.test_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
prec, prec_op = metrics.streaming_precision_at_thresholds(
tf_predictions, tf_labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
tf_predictions, tf_labels, thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run([prec_op, rec_op])
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_prec, prec.eval(), 2)
self.assertAlmostEqual(expected_rec, rec.eval(), 2)
class StreamingFPRThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positive_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'false_positive_rate_at_thresholds/false_positives:0',
'false_positive_rate_at_thresholds/true_negatives:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
fpr, _ = metrics.streaming_false_positive_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [fpr])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(fpr_op)
# Then verify idempotency.
initial_fpr = fpr.eval()
for _ in range(10):
self.assertAllClose(initial_fpr, fpr.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertEqual(0, fpr.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0.5, fpr.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(1, fpr.eval())
def testWeights1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fpr_low = fpr[0]
fpr_high = fpr[1]
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0.0, fpr_low.eval(), places=5)
self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)
def testWeights2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fpr_low = fpr[0]
fpr_high = fpr[1]
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0.0, fpr_low.eval(), places=5)
self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)
def testExtremeThresholds(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
fpr_low = fpr[0]
fpr_high = fpr[1]
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(1.0, fpr_low.eval(), places=5)
self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)
def testZeroLabelsPredictions(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0, fpr.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
fp = 0
tn = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 0:
fp += 1
else:
if labels[i] == 0:
tn += 1
epsilon = 1e-7
expected_fpr = fp / (epsilon + fp + tn)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.test_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
tf_predictions, tf_labels, thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run(fpr_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_fpr, fpr.eval(), 2)
class RecallAtPrecisionTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.recall_at_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
precision=0.7)
_assert_metric_variables(self, ('recall_at_precision/true_positives:0',
'recall_at_precision/false_negatives:0',
'recall_at_precision/false_positives:0',
'recall_at_precision/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.recall_at_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
precision=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.recall_at_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
precision=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_recall = recall.eval()
for _ in range(10):
self.assertAlmostEqual(initial_recall, recall.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=1.0)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, recall.eval())
def testSomeCorrectHighPrecision(self):
predictions_values = [1, .9, .8, .7, .6, .5, .4, .3]
labels_values = [1, 1, 1, 1, 0, 0, 0, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=0.8)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, recall.eval())
def testSomeCorrectLowPrecision(self):
predictions_values = [1, .9, .8, .7, .6, .5, .4, .3, .2, .1]
labels_values = [1, 1, 0, 0, 0, 0, 0, 0, 0, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
target_recall = 2.0 / 3.0
self.assertAlmostEqual(target_recall, sess.run(update_op))
self.assertAlmostEqual(target_recall, recall.eval())
def testWeighted(self):
predictions_values = [1, .9, .8, .7, .6]
labels_values = [1, 1, 0, 0, 1]
weights_values = [1, 1, 3, 4, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
recall, update_op = metrics.recall_at_precision(
labels, predictions, weights=weights, precision=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
target_recall = 2.0 / 3.0
self.assertAlmostEqual(target_recall, sess.run(update_op))
self.assertAlmostEqual(target_recall, recall.eval())
class StreamingFNRThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negative_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'false_negative_rate_at_thresholds/false_negatives:0',
'false_negative_rate_at_thresholds/true_positives:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
fnr, _ = metrics.streaming_false_negative_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [fnr])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(fnr_op)
# Then verify idempotency.
initial_fnr = fnr.eval()
for _ in range(10):
self.assertAllClose(initial_fnr, fnr.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertEqual(0, fnr.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.5, fnr.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(1, fnr.eval())
def testWeights1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fnr_low = fnr[0]
fnr_high = fnr[1]
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.0, fnr_low.eval(), places=5)
self.assertAlmostEqual(1.0, fnr_high.eval(), places=5)
def testWeights2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fnr_low = fnr[0]
fnr_high = fnr[1]
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.0, fnr_low.eval(), places=5)
self.assertAlmostEqual(1.0, fnr_high.eval(), places=5)
def testExtremeThresholds(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
fnr_low = fnr[0]
fnr_high = fnr[1]
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.0, fnr_low.eval())
self.assertAlmostEqual(1.0, fnr_high.eval())
def testZeroLabelsPredictions(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0, fnr.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
fn = 0
tp = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 1:
tp += 1
else:
if labels[i] == 1:
fn += 1
epsilon = 1e-7
expected_fnr = fn / (epsilon + fn + tp)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.test_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
tf_predictions, tf_labels, thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run(fnr_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_fnr, fnr.eval(), 2)
# TODO(ptucker): Remove when we remove `streaming_recall_at_k`.
# This op will be deprecated soon in favor of `streaming_sparse_recall_at_k`.
# Until then, this test validates that both ops yield the same results.
class StreamingRecallAtKTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
self._batch_size = 4
self._num_classes = 3
self._np_predictions = np.matrix(('0.1 0.2 0.7;'
'0.6 0.2 0.2;'
'0.0 0.9 0.1;'
'0.2 0.0 0.8'))
self._np_labels = [0, 0, 0, 0]
def testVars(self):
metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones((self._batch_size,), dtype=dtypes_lib.int32),
k=1)
_assert_metric_variables(self,
('recall_at_1/count:0', 'recall_at_1/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones((self._batch_size,), dtype=dtypes_lib.int32),
k=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones((self._batch_size,), dtype=dtypes_lib.int32),
k=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testSingleUpdateKIs1(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=1)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=1)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.25, sess.run(update_op))
self.assertEqual(0.25, recall.eval())
self.assertEqual(0.25, sess.run(sp_update_op))
self.assertEqual(0.25, sp_recall.eval())
def testSingleUpdateKIs2(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=2)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, recall.eval())
self.assertEqual(0.5, sess.run(sp_update_op))
self.assertEqual(0.5, sp_recall.eval())
def testSingleUpdateKIs3(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=3)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=3)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, recall.eval())
self.assertEqual(1.0, sess.run(sp_update_op))
self.assertEqual(1.0, sp_recall.eval())
def testSingleUpdateSomeMissingKIs2(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
weights = constant_op.constant(
[0, 1, 0, 1], shape=(self._batch_size,), dtype=dtypes_lib.float32)
recall, update_op = metrics.streaming_recall_at_k(
predictions, labels, k=2, weights=weights)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions,
array_ops.reshape(labels, (self._batch_size, 1)),
k=2,
weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, recall.eval())
self.assertEqual(1.0, sess.run(sp_update_op))
self.assertEqual(1.0, sp_recall.eval())
class StreamingSparsePrecisionTest(test.TestCase):
def _test_streaming_sparse_precision_at_k(self,
predictions,
labels,
k,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_streaming_sparse_precision_at_top_k(self,
top_k_predictions,
labels,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_precision_at_top_k(
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int32),
labels=labels,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
self.assertTrue(math.isnan(update.eval()))
self.assertTrue(math.isnan(metric.eval()))
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_streaming_sparse_average_precision_at_k(self,
predictions,
labels,
k,
expected,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
predictions = constant_op.constant(predictions, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_average_precision_at_k(
predictions, labels, k, weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
local_variables = variables.local_variables()
variables.variables_initializer(local_variables).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertAlmostEqual(expected, update.eval())
self.assertAlmostEqual(expected, metric.eval())
def _test_streaming_sparse_average_precision_at_top_k(self,
top_k_predictions,
labels,
expected,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_average_precision_at_top_k(
top_k_predictions, labels, weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
local_variables = variables.local_variables()
variables.variables_initializer(local_variables).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertAlmostEqual(expected, update.eval())
self.assertAlmostEqual(expected, metric.eval())
def test_top_k_rank_invalid(self):
with self.test_session():
# top_k_predictions has rank < 2.
top_k_predictions = [9, 4, 6, 2, 0]
sp_labels = sparse_tensor.SparseTensorValue(
indices=np.array([[
0,
], [
1,
], [
2,
]], np.int64),
values=np.array([2, 7, 8], np.int64),
dense_shape=np.array([
10,
], np.int64))
with self.assertRaises(ValueError):
precision, _ = metrics.streaming_sparse_precision_at_top_k(
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int64),
labels=sp_labels)
variables.variables_initializer(variables.local_variables()).run()
precision.eval()
def test_average_precision(self):
# Example 1.
# Matches example here:
# fastml.com/what-you-wanted-to-know-about-mean-average-precision
labels_ex1 = (0, 1, 2, 3, 4)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=avg_precision_ex1[i])
# Example 2.
labels_ex2 = (0, 2, 4, 5, 6)
labels = np.array([labels_ex2], dtype=np.int64)
predictions_ex2 = (0.3, 0.5, 0.0, 0.4, 0.0, 0.1, 0.2)
predictions = (predictions_ex2,)
predictions_top_k_ex2 = (1, 3, 0, 6, 5)
precision_ex2 = (0.0 / 1, 0.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex2 = (0.0 / 1, 0.0 / 2, precision_ex2[2] / 3,
(precision_ex2[2] + precision_ex2[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex2[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex2[:k],), labels, expected=precision_ex2[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex2[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex2[:k],), labels, expected=avg_precision_ex2[i])
# Both examples, we expect both precision and average precision to be the
# average of the 2 examples.
labels = np.array([labels_ex1, labels_ex2], dtype=np.int64)
predictions = (predictions_ex1, predictions_ex2)
streaming_precision = [
(ex1 + ex2) / 2 for ex1, ex2 in zip(precision_ex1, precision_ex2)
]
streaming_average_precision = [
(ex1 + ex2) / 2
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=streaming_precision[i])
predictions_top_k = (predictions_top_k_ex1[:k], predictions_top_k_ex2[:k])
self._test_streaming_sparse_precision_at_top_k(
predictions_top_k, labels, expected=streaming_precision[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=streaming_average_precision[i])
self._test_streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels, expected=streaming_average_precision[i])
# Weighted examples, we expect streaming average precision to be the
# weighted average of the 2 examples.
weights = (0.3, 0.6)
streaming_average_precision = [
(weights[0] * ex1 + weights[1] * ex2) / (weights[0] + weights[1])
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_average_precision_at_k(
predictions,
labels,
k,
expected=streaming_average_precision[i],
weights=weights)
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k], predictions_top_k_ex2[:k]),
labels,
expected=streaming_average_precision[i],
weights=weights)
def test_average_precision_some_labels_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
labels_ex1 = (-1, 0, 1, 2, 3, 4, 7)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=avg_precision_ex1[i])
def test_average_precision_at_top_k_static_shape_check(self):
predictions_top_k = array_ops.placeholder(
shape=(2, None), dtype=dtypes_lib.int64)
labels = np.array(((1,), (2,)), dtype=np.int64)
# Fails due to non-static predictions_idx shape.
with self.assertRaises(ValueError):
metric_ops.streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels)
predictions_top_k = (2, 1)
# Fails since rank of predictions_idx is less than one.
with self.assertRaises(ValueError):
metric_ops.streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels)
predictions_top_k = ((2,), (1,))
# Valid static shape.
metric_ops.streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels)
def test_one_label_at_k1_nan(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,1,2 have 0 predictions, classes -1 and 4 are out of range.
for class_id in (-1, 0, 1, 2, 4):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_one_label_at_k1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=1.0 / 2, class_id=3)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=1.0 / 2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2)
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_labels(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0, class_id=class_id)
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, 2 correct predictions.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=3.0 / 10)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=3.0 / 10)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2], [1,
3]],
# values -1 and 10 are outside the [0, n_classes) range and are ignored.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, 2 correct predictions.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=2.0 / 2, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=1.0 / 1, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=0.0 / 1, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=3.0 / 10)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=3.0 / 10)
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_3d_no_labels(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 predictions, all correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=4.0 / 4, class_id=2)
# Class 5: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2, class_id=5)
# Class 7: 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2, class_id=7)
# All classes: 20 predictions, 7 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=7.0 / 20)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=7.0 / 20)
def test_3d_ignore_all(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
for class_id in xrange(10):
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: no predictions.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=7,
weights=[[1, 0], [0, 1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=7,
weights=[[1, 0], [0, 1]])
# Class 7: 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[0, 1], [1, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=1.0 / 2.0,
class_id=7,
weights=[[0, 1], [1, 0]])
def test_sparse_tensor_value(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
labels = [[0, 0, 0, 1], [0, 0, 1, 0]]
expected_precision = 0.5
with self.test_session():
_, precision = metrics.streaming_sparse_precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=_binary_2d_label_to_sparse_value(labels),
k=1)
variables.variables_initializer(variables.local_variables()).run()
self.assertEqual(expected_precision, precision.eval())
class StreamingSparseRecallTest(test.TestCase):
def _test_streaming_sparse_recall_at_k(self,
predictions,
labels,
k,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_sparse_recall_at_top_k(self,
labels,
top_k_predictions,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metric_ops.sparse_recall_at_top_k(
labels=labels,
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int32),
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
self.assertTrue(math.isnan(update.eval()))
self.assertTrue(math.isnan(metric.eval()))
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def test_one_label_at_k1_nan(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
# Classes 0,1 have 0 labels, 0 predictions, classes -1 and 4 are out of
# range.
for labels in (sparse_labels, dense_labels):
for class_id in (-1, 0, 1, 4):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_one_label_at_k1_no_predictions(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 0 predictions.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.0, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0, class_id=2)
def test_one_label_at_k1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, class_id=3)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2)
def test_one_label_at_k1_weighted(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=3, weights=(0.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=3, weights=(0.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0,))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(2.0,))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(2.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 0.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=3,
weights=(0.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 1.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=3,
weights=(0.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 0.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 1.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=2.0 / 2,
class_id=3,
weights=(2.0, 3.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2,
class_id=3,
weights=(2.0, 3.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=3.0 / 3,
class_id=3,
weights=(3.0, 2.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=3.0 / 3,
class_id=3,
weights=(3.0, 2.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=0.3 / 0.3,
class_id=3,
weights=(0.3, 0.6))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.3 / 0.3,
class_id=3,
weights=(0.3, 0.6))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=0.6 / 0.6,
class_id=3,
weights=(0.6, 0.3))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.6 / 0.6,
class_id=3,
weights=(0.6, 0.3))
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, weights=(0.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=(0.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(1.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(2.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(2.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.6 / 0.9, weights=(0.6, 0.3))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.6 / 0.9, weights=(0.6, 0.3))
def test_three_labels_at_k5_nan(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 8: 1 label, no predictions.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=8)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, class_id=8)
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, class_id=7)
# All classes: 6 labels, 3 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=3.0 / 6)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=3.0 / 6)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) count in denominator."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2], [1,
3]],
# values -1 and 10 are outside the [0, n_classes) range.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=2.0 / 2,
class_id=2)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=1.0 / 1,
class_id=5)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=0.0 / 1,
class_id=7)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=0.0 / 1, class_id=7)
# All classes: 8 labels, 3 correct.
self._test_streaming_sparse_recall_at_k(
predictions=predictions, labels=sp_labels, k=5, expected=3.0 / 8)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=3.0 / 8)
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
sparse_labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 1, 0]]])
dense_labels = np.array(
[[[2, 7, 8], [1, 2, 5]], [
[1, 2, 5],
[2, 7, 8],
]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_3d_no_predictions(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
sparse_labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 1, 0]]])
dense_labels = np.array(
[[[2, 7, 8], [1, 2, 5]], [
[1, 2, 5],
[2, 7, 8],
]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,8 have 0 predictions, >=1 label.
for class_id in (1, 8):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 labels, all correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=4.0 / 4, class_id=2)
# Class 5: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 2, class_id=5)
# Class 7: 2 labels, 1 incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, class_id=7)
# All classes: 12 labels, 7 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=7.0 / 12)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=7.0 / 12)
def test_3d_ignore_all(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
for class_id in xrange(10):
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 label, correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 2 labels, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[1, 0], [1, 0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 2.0,
class_id=7,
weights=[[1, 0], [1, 0]])
# Class 7: No labels.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=7,
weights=[[0, 1], [0, 1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=7,
weights=[[0, 1], [0, 1]])
def test_sparse_tensor_value(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
labels = [[0, 0, 1, 0], [0, 0, 0, 1]]
expected_recall = 0.5
with self.test_session():
_, recall = metrics.streaming_sparse_recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=_binary_2d_label_to_sparse_value(labels),
k=1)
variables.variables_initializer(variables.local_variables()).run()
self.assertEqual(expected_recall, recall.eval())
class StreamingMeanAbsoluteErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_absolute_error/count:0', 'mean_absolute_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_absolute_error(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.streaming_mean_absolute_error(
predictions, labels, weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(3, sess.run(update_op))
self.assertEqual(3, error.eval())
class StreamingMeanRelativeErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_relative_error/count:0', 'mean_relative_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
normalizer = random_ops.random_normal((10, 3), seed=3)
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateNormalizedByLabels(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_labels = np.asarray([1, 3, 2, 3], dtype=np.float32)
expected_error = np.mean(
np.divide(np.absolute(np_predictions - np_labels), np_labels))
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(np_labels, shape=(1, 4))
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer=labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(expected_error, sess.run(update_op))
self.assertEqual(expected_error, error.eval())
def testSingleUpdateNormalizedByZeros(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer=array_ops.zeros_like(labels))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.0, sess.run(update_op))
self.assertEqual(0.0, error.eval())
class StreamingMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_squared_error/count:0', 'mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
predictions = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
labels = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError(self):
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(6, sess.run(update_op))
self.assertEqual(6, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.streaming_mean_squared_error(
predictions, labels, weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(13, sess.run(update_op))
self.assertEqual(13, error.eval())
def testMultipleBatchesOfSizeOne(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
error, update_op = metrics.streaming_mean_squared_error(
predictions, labels)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(208.0 / 6, sess.run(update_op), 5)
self.assertAlmostEqual(208.0 / 6, error.eval(), 5)
def testMetricsComputedConcurrently(self):
with self.test_session() as sess:
# Create the queue that populates one set of predictions.
preds_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue0, [10, 8, 6])
_enqueue_vector(sess, preds_queue0, [-4, 3, -1])
predictions0 = preds_queue0.dequeue()
# Create the queue that populates one set of predictions.
preds_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue1, [0, 1, 1])
_enqueue_vector(sess, preds_queue1, [1, 1, 0])
predictions1 = preds_queue1.dequeue()
# Create the queue that populates one set of labels.
labels_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue0, [1, 3, 2])
_enqueue_vector(sess, labels_queue0, [2, 4, 6])
labels0 = labels_queue0.dequeue()
# Create the queue that populates another set of labels.
labels_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue1, [-5, -3, -1])
_enqueue_vector(sess, labels_queue1, [5, 4, 3])
labels1 = labels_queue1.dequeue()
mse0, update_op0 = metrics.streaming_mean_squared_error(
predictions0, labels0, name='msd0')
mse1, update_op1 = metrics.streaming_mean_squared_error(
predictions1, labels1, name='msd1')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1])
sess.run([update_op0, update_op1])
mse0, mse1 = sess.run([mse0, mse1])
self.assertAlmostEqual(208.0 / 6, mse0, 5)
self.assertAlmostEqual(79.0 / 6, mse1, 5)
def testMultipleMetricsOnMultipleBatchesOfSizeOne(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
mae, ma_update_op = metrics.streaming_mean_absolute_error(
predictions, labels)
mse, ms_update_op = metrics.streaming_mean_squared_error(
predictions, labels)
sess.run(variables.local_variables_initializer())
sess.run([ma_update_op, ms_update_op])
sess.run([ma_update_op, ms_update_op])
self.assertAlmostEqual(32.0 / 6, mae.eval(), 5)
self.assertAlmostEqual(208.0 / 6, mse.eval(), 5)
class StreamingRootMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('root_mean_squared_error/count:0', 'root_mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
with self.test_session() as sess:
predictions = constant_op.constant(
0.0, shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(0.0, shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, rmse.eval())
def testSingleUpdateWithError(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(6), update_op.eval(), 5)
self.assertAlmostEqual(math.sqrt(6), rmse.eval(), 5)
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
rmse, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels, weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(13), sess.run(update_op))
self.assertAlmostEqual(math.sqrt(13), rmse.eval(), 5)
class StreamingCovarianceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) +
array_ops.ones([10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]))
_assert_metric_variables(self, (
'covariance/comoment:0',
'covariance/count:0',
'covariance/mean_label:0',
'covariance/mean_prediction:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
cov, _ = metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) +
array_ops.ones([10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [cov])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) +
array_ops.ones([10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
labels = random_ops.random_normal((10, 3), seed=2)
predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5
cov, update_op = metrics.streaming_covariance(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_cov = cov.eval()
for _ in range(10):
self.assertEqual(initial_cov, cov.eval())
def testSingleUpdateIdentical(self):
with self.test_session() as sess:
predictions = math_ops.to_float(math_ops.range(10))
labels = math_ops.to_float(math_ops.range(10))
cov, update_op = metrics.streaming_covariance(predictions, labels)
expected_cov = np.cov(np.arange(10), np.arange(10))[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, sess.run(update_op), 5)
self.assertAlmostEqual(expected_cov, cov.eval(), 5)
def testSingleUpdateNonIdentical(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(predictions, labels)
expected_cov = np.cov([2, 4, 6], [1, 3, 2])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, update_op.eval())
self.assertAlmostEqual(expected_cov, cov.eval())
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 7], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 3, 1], shape=(1, 4), dtype=dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(
predictions, labels, weights=weights)
expected_cov = np.cov(
[2, 4, 6, 8], [1, 3, 2, 7], fweights=[0, 1, 3, 1])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, sess.run(update_op))
self.assertAlmostEqual(expected_cov, cov.eval())
def testMultiUpdateWithErrorNoWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
cov, update_op = metrics.streaming_covariance(predictions_t, labels_t)
sess.run(variables.local_variables_initializer())
prev_expected_cov = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)]
}
self.assertEqual(
np.isnan(prev_expected_cov),
np.isnan(sess.run(cov, feed_dict=feed_dict)))
if not np.isnan(prev_expected_cov):
self.assertAlmostEqual(prev_expected_cov,
sess.run(cov, feed_dict=feed_dict), 5)
expected_cov = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(expected_cov,
sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(expected_cov, sess.run(cov, feed_dict=feed_dict),
5)
prev_expected_cov = expected_cov
def testMultiUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
weights = np.tile(np.arange(n // 10), n // 10)
np.random.shuffle(weights)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
cov, update_op = metrics.streaming_covariance(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
prev_expected_cov = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
self.assertEqual(
np.isnan(prev_expected_cov),
np.isnan(sess.run(cov, feed_dict=feed_dict)))
if not np.isnan(prev_expected_cov):
self.assertAlmostEqual(prev_expected_cov,
sess.run(cov, feed_dict=feed_dict), 5)
expected_cov = np.cov(
predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(expected_cov,
sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(expected_cov, sess.run(cov, feed_dict=feed_dict),
5)
prev_expected_cov = expected_cov
class StreamingPearsonRTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) +
array_ops.ones([10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]))
_assert_metric_variables(self, (
'pearson_r/covariance/comoment:0',
'pearson_r/covariance/count:0',
'pearson_r/covariance/mean_label:0',
'pearson_r/covariance/mean_prediction:0',
'pearson_r/variance_labels/count:0',
'pearson_r/variance_labels/comoment:0',
'pearson_r/variance_labels/mean_label:0',
'pearson_r/variance_labels/mean_prediction:0',
'pearson_r/variance_predictions/comoment:0',
'pearson_r/variance_predictions/count:0',
'pearson_r/variance_predictions/mean_label:0',
'pearson_r/variance_predictions/mean_prediction:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
pearson_r, _ = metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) +
array_ops.ones([10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [pearson_r])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) +
array_ops.ones([10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
labels = random_ops.random_normal((10, 3), seed=2)
predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_r = pearson_r.eval()
for _ in range(10):
self.assertEqual(initial_r, pearson_r.eval())
def testSingleUpdateIdentical(self):
with self.test_session() as sess:
predictions = math_ops.to_float(math_ops.range(10))
labels = math_ops.to_float(math_ops.range(10))
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions, labels)
expected_r = np.corrcoef(np.arange(10), np.arange(10))[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, sess.run(update_op), 5)
self.assertAlmostEqual(expected_r, pearson_r.eval(), 5)
def testSingleUpdateNonIdentical(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions, labels)
expected_r = np.corrcoef([2, 4, 6], [1, 3, 2])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, update_op.eval())
self.assertAlmostEqual(expected_r, pearson_r.eval())
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = np.array([2, 4, 6, 8])
labels = np.array([1, 3, 2, 7])
weights = np.array([0, 1, 3, 1])
predictions_t = constant_op.constant(
predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels_t = constant_op.constant(
labels, shape=(1, 4), dtype=dtypes_lib.float32)
weights_t = constant_op.constant(
weights, shape=(1, 4), dtype=dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
cmat = np.cov(predictions, labels, fweights=weights)
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, sess.run(update_op))
self.assertAlmostEqual(expected_r, pearson_r.eval())
def testMultiUpdateWithErrorNoWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t)
sess.run(variables.local_variables_initializer())
prev_expected_r = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)]
}
self.assertEqual(
np.isnan(prev_expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(prev_expected_r):
self.assertAlmostEqual(prev_expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
expected_r = np.corrcoef(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(expected_r,
sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
prev_expected_r = expected_r
def testMultiUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
weights = np.tile(np.arange(n // 10), n // 10)
np.random.shuffle(weights)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
prev_expected_r = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
self.assertEqual(
np.isnan(prev_expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(prev_expected_r):
self.assertAlmostEqual(prev_expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
cmat = np.cov(
predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
self.assertAlmostEqual(expected_r,
sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
prev_expected_r = expected_r
def testMultiUpdateWithErrorAndSingletonBatches(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
weights = (np.arange(n).reshape(n // stride, stride) % stride == 0)
for row in weights:
np.random.shuffle(row)
# Now, weights is one-hot by row - one item per batch has non-zero weight.
weights = weights.reshape((n,))
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
cmat = np.cov(
predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
actual_r = sess.run(update_op, feed_dict=feed_dict)
self.assertEqual(np.isnan(expected_r), np.isnan(actual_r))
self.assertEqual(
np.isnan(expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(expected_r):
self.assertAlmostEqual(expected_r, actual_r, 5)
self.assertAlmostEqual(expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
class StreamingMeanCosineDistanceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1)
_assert_metric_variables(self, (
'mean_cosine_distance/count:0',
'mean_cosine_distance/total:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=1)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError1(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 5)
self.assertAlmostEqual(1, error.eval(), 5)
def testSingleUpdateWithError2(self):
np_predictions = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
np_labels = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.0, error.eval(), 5)
def testSingleUpdateWithErrorAndWeights1(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithErrorAndWeights2(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 1], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.5, update_op.eval())
self.assertEqual(1.5, error.eval())
class PcntBelowThreshTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_percentage_less(values=array_ops.ones((10,)), threshold=2)
_assert_metric_variables(self, (
'percentage_below_threshold/count:0',
'percentage_below_threshold/total:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_percentage_less(
values=array_ops.ones((10,)),
threshold=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_percentage_less(
values=array_ops.ones((10,)),
threshold=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testOneUpdate(self):
with self.test_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.streaming_percentage_less(
values, 100, name='high')
pcnt1, update_op1 = metrics.streaming_percentage_less(
values, 7, name='medium')
pcnt2, update_op2 = metrics.streaming_percentage_less(
values, 1, name='low')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1, update_op2])
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.75, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
def testSomePresentOneUpdate(self):
with self.test_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0, 1], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.streaming_percentage_less(
values, 100, weights=weights, name='high')
pcnt1, update_op1 = metrics.streaming_percentage_less(
values, 7, weights=weights, name='medium')
pcnt2, update_op2 = metrics.streaming_percentage_less(
values, 1, weights=weights, name='low')
sess.run(variables.local_variables_initializer())
self.assertListEqual([1.0, 0.5, 0.0],
sess.run([update_op0, update_op1, update_op2]))
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.5, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
class StreamingMeanIOUTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2)
_assert_metric_variables(self, ('mean_iou/total_confusion_matrix:0',))
def testMetricsCollections(self):
my_collection_name = '__metrics__'
mean_iou, _ = metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean_iou])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10, 3])
labels = array_ops.ones([10, 4])
with self.assertRaises(ValueError):
metrics.streaming_mean_iou(predictions, labels, num_classes=2)
def testLabelsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10])
labels = array_ops.ones([10])
weights = array_ops.zeros([9])
with self.assertRaises(ValueError):
metrics.streaming_mean_iou(
predictions, labels, num_classes=2, weights=weights)
def testValueTensorIsIdempotent(self):
num_classes = 3
predictions = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=2)
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes=num_classes)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_miou = miou.eval()
for _ in range(10):
self.assertEqual(initial_miou, miou.eval())
def testMultipleUpdates(self):
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 2.0, 1.0 / 4.0, 0.])
self.assertEqual(desired_output, miou.eval())
def testMultipleUpdatesWithWeights(self):
num_classes = 2
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
weights = weights_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
for _ in range(6):
sess.run(update_op)
desired_output = np.mean([2.0 / 3.0, 1.0 / 2.0])
self.assertAlmostEqual(desired_output, miou.eval())
def testMultipleUpdatesWithMissingClass(self):
# Test the case where there are no predicions and labels for
# one class, and thus there is one row and one column with
# zero entries in the confusion matrix.
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
# There is no prediction for class 2.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
# There is label for class 2.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 3.0, 2.0 / 4.0])
self.assertAlmostEqual(desired_output, miou.eval())
def testUpdateOpEvalIsAccumulatedConfusionMatrix(self):
predictions = array_ops.concat([
constant_op.constant(0, shape=[5]),
constant_op.constant(1, shape=[5])
], 0)
labels = array_ops.concat([
constant_op.constant(0, shape=[3]),
constant_op.constant(1, shape=[7])
], 0)
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
confusion_matrix = update_op.eval()
self.assertAllEqual([[3, 0], [2, 5]], confusion_matrix)
desired_miou = np.mean([3. / 5., 5. / 7.])
self.assertAlmostEqual(desired_miou, miou.eval())
def testAllCorrect(self):
predictions = array_ops.zeros([40])
labels = array_ops.zeros([40])
num_classes = 1
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertEqual(40, update_op.eval()[0])
self.assertEqual(1.0, miou.eval())
def testAllWrong(self):
predictions = array_ops.zeros([40])
labels = array_ops.ones([40])
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[0, 0], [40, 0]], update_op.eval())
self.assertEqual(0., miou.eval())
def testResultsWithSomeMissing(self):
predictions = array_ops.concat([
constant_op.constant(0, shape=[5]),
constant_op.constant(1, shape=[5])
], 0)
labels = array_ops.concat([
constant_op.constant(0, shape=[3]),
constant_op.constant(1, shape=[7])
], 0)
num_classes = 2
weights = array_ops.concat([
constant_op.constant(0, shape=[1]),
constant_op.constant(1, shape=[8]),
constant_op.constant(0, shape=[1])
], 0)
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[2, 0], [2, 4]], update_op.eval())
desired_miou = np.mean([2. / 4., 4. / 6.])
self.assertAlmostEqual(desired_miou, miou.eval())
def testMissingClassInLabels(self):
labels = constant_op.constant([[[0, 0, 1, 1, 0, 0], [1, 0, 0, 0, 0, 1]],
[[1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]])
predictions = constant_op.constant(
[[[0, 0, 2, 1, 1, 0], [0, 1, 2, 2, 0, 1]], [[0, 0, 2, 1, 1, 1],
[1, 1, 2, 0, 0, 0]]])
num_classes = 3
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[7, 4, 3], [3, 5, 2], [0, 0, 0]], update_op.eval())
self.assertAlmostEqual(1 / 3 * (7 / (7 + 3 + 7) + 5 / (5 + 4 + 5) + 0 /
(0 + 5 + 0)), miou.eval())
def testMissingClassOverallSmall(self):
labels = constant_op.constant([0])
predictions = constant_op.constant([0])
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[1, 0], [0, 0]], update_op.eval())
self.assertAlmostEqual(1, miou.eval())
def testMissingClassOverallLarge(self):
labels = constant_op.constant([[[0, 0, 1, 1, 0, 0], [1, 0, 0, 0, 0, 1]],
[[1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]])
predictions = constant_op.constant(
[[[0, 0, 1, 1, 0, 0], [1, 1, 0, 0, 1, 1]], [[0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0]]])
num_classes = 3
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[9, 5, 0], [3, 7, 0], [0, 0, 0]], update_op.eval())
self.assertAlmostEqual(1 / 2 * (9 / (9 + 3 + 5) + 7 / (7 + 5 + 3)),
miou.eval())
class StreamingConcatTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_concat(values=array_ops.ones((10,)))
_assert_metric_variables(self, (
'streaming_concat/array:0',
'streaming_concat/size:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
value, _ = metrics.streaming_concat(
values=array_ops.ones((10,)), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [value])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_concat(
values=array_ops.ones((10,)), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testNextArraySize(self):
next_array_size = metric_ops._next_array_size # pylint: disable=protected-access
with self.test_session():
self.assertEqual(next_array_size(2, growth_factor=2).eval(), 2)
self.assertEqual(next_array_size(3, growth_factor=2).eval(), 4)
self.assertEqual(next_array_size(4, growth_factor=2).eval(), 4)
self.assertEqual(next_array_size(5, growth_factor=2).eval(), 8)
self.assertEqual(next_array_size(6, growth_factor=2).eval(), 8)
def testStreamingConcat(self):
with self.test_session() as sess:
values = array_ops.placeholder(dtypes_lib.int32, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: [0, 1, 2]})
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run([update_op], feed_dict={values: [3, 4]})
self.assertAllEqual([0, 1, 2, 3, 4], concatenated.eval())
sess.run([update_op], feed_dict={values: [5, 6, 7, 8, 9]})
self.assertAllEqual(np.arange(10), concatenated.eval())
def testStreamingConcatStringValues(self):
with self.test_session() as sess:
values = array_ops.placeholder(dtypes_lib.string, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertItemsEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: ['a', 'b', 'c']})
self.assertItemsEqual([b'a', b'b', b'c'], concatenated.eval())
sess.run([update_op], feed_dict={values: ['d', 'e']})
self.assertItemsEqual([b'a', b'b', b'c', b'd', b'e'], concatenated.eval())
sess.run([update_op], feed_dict={values: ['f', 'g', 'h', 'i', 'j']})
self.assertItemsEqual(
[b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i', b'j'],
concatenated.eval())
def testStreamingConcatMaxSize(self):
with self.test_session() as sess:
values = math_ops.range(3)
concatenated, update_op = metrics.streaming_concat(values, max_size=5)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())
def testStreamingConcat2D(self):
with self.test_session() as sess:
values = array_ops.reshape(math_ops.range(3), (3, 1))
concatenated, update_op = metrics.streaming_concat(values, axis=-1)
sess.run(variables.local_variables_initializer())
for _ in range(10):
sess.run([update_op])
self.assertAllEqual([[0] * 10, [1] * 10, [2] * 10], concatenated.eval())
def testStreamingConcatErrors(self):
with self.assertRaises(ValueError):
metrics.streaming_concat(array_ops.placeholder(dtypes_lib.float32))
values = array_ops.zeros((2, 3))
with self.assertRaises(ValueError):
metrics.streaming_concat(values, axis=-3, max_size=3)
with self.assertRaises(ValueError):
metrics.streaming_concat(values, axis=2, max_size=3)
with self.assertRaises(ValueError):
metrics.streaming_concat(
array_ops.placeholder(dtypes_lib.float32, [None, None]))
def testStreamingConcatReset(self):
with self.test_session() as sess:
values = array_ops.placeholder(dtypes_lib.int32, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: [0, 1, 2]})
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run(variables.local_variables_initializer())
sess.run([update_op], feed_dict={values: [3, 4]})
self.assertAllEqual([3, 4], concatenated.eval())
class AggregateMetricsTest(test.TestCase):
def testAggregateNoMetricsRaisesValueError(self):
with self.assertRaises(ValueError):
metrics.aggregate_metrics()
def testAggregateSingleMetricReturnsOneItemLists(self):
values = array_ops.ones((10, 4))
value_tensors, update_ops = metrics.aggregate_metrics(
metrics.streaming_mean(values))
self.assertEqual(len(value_tensors), 1)
self.assertEqual(len(update_ops), 1)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, update_ops[0].eval())
self.assertEqual(1, value_tensors[0].eval())
def testAggregateMultipleMetricsReturnsListsInOrder(self):
predictions = array_ops.ones((10, 4))
labels = array_ops.ones((10, 4)) * 3
value_tensors, update_ops = metrics.aggregate_metrics(
metrics.streaming_mean_absolute_error(predictions, labels),
metrics.streaming_mean_squared_error(predictions, labels))
self.assertEqual(len(value_tensors), 2)
self.assertEqual(len(update_ops), 2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(2, update_ops[0].eval())
self.assertEqual(4, update_ops[1].eval())
self.assertEqual(2, value_tensors[0].eval())
self.assertEqual(4, value_tensors[1].eval())
class AggregateMetricMapTest(test.TestCase):
def testAggregateMultipleMetricsReturnsListsInOrder(self):
predictions = array_ops.ones((10, 4))
labels = array_ops.ones((10, 4)) * 3
names_to_values, names_to_updates = metrics.aggregate_metric_map({
'm1': metrics.streaming_mean_absolute_error(predictions, labels),
'm2': metrics.streaming_mean_squared_error(predictions, labels),
})
self.assertEqual(2, len(names_to_values))
self.assertEqual(2, len(names_to_updates))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(2, names_to_updates['m1'].eval())
self.assertEqual(4, names_to_updates['m2'].eval())
self.assertEqual(2, names_to_values['m1'].eval())
self.assertEqual(4, names_to_values['m2'].eval())
class CountTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.count(array_ops.ones([4, 3]))
_assert_metric_variables(self, ['count/count:0'])
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.count(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.count(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
result, update_op = metrics.count(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAlmostEqual(8.0, sess.run(result), 5)
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
result, update_op = metrics.count(values)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(2.0, sess.run(update_op), 5)
self.assertAlmostEqual(4.0, sess.run(update_op), 5)
self.assertAlmostEqual(6.0, sess.run(update_op), 5)
self.assertAlmostEqual(8.0, sess.run(update_op), 5)
self.assertAlmostEqual(8.0, sess.run(result), 5)
def test1dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [0.5])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1.2])
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual(3.4, result.eval(), 5)
def test1dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1,))
_enqueue_vector(sess, weights_queue, 0.5, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 1.2, shape=(1,))
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual(3.4, result.eval(), 5)
def test2dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1.1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual(4.1, result.eval(), 5)
def test2dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(2,))
_enqueue_vector(sess, weights_queue, [1.1, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [1, 0], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 0], shape=(2,))
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual(4.1, result.eval(), 5)
class CohenKappaTest(test.TestCase):
def _confusion_matrix_to_samples(self, confusion_matrix):
x, y = confusion_matrix.shape
pairs = []
for label in range(x):
for feature in range(y):
pairs += [label, feature] * confusion_matrix[label, feature]
pairs = np.array(pairs).reshape((-1, 2))
return pairs[:, 0], pairs[:, 1]
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.cohen_kappa(
predictions_idx=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_classes=2)
_assert_metric_variables(self, (
'cohen_kappa/po:0',
'cohen_kappa/pe_row:0',
'cohen_kappa/pe_col:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
kappa, _ = metrics.cohen_kappa(
predictions_idx=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [kappa])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.cohen_kappa(
predictions_idx=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 1), maxval=3, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 1), maxval=3, dtype=dtypes_lib.int64, seed=2)
kappa, update_op = metrics.cohen_kappa(labels, predictions, 3)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_kappa = kappa.eval()
for _ in range(10):
self.assertAlmostEqual(initial_kappa, kappa.eval(), 5)
def testBasic(self):
confusion_matrix = np.array([[9, 3, 1], [4, 8, 2], [2, 1, 6]])
# overall total = 36
# po = [9, 8, 6], sum(po) = 23
# pe_row = [15, 12, 9], pe_col = [13, 14, 9], so pe = [5.42, 4.67, 2.25]
# finally, kappa = (sum(po) - sum(pe)) / (N - sum(pe))
# = (23 - 12.34) / (36 - 12.34)
# = 0.45
# see: http://psych.unl.edu/psycrs/handcomp/hckappa.PDF
expect = 0.45
labels, predictions = self._confusion_matrix_to_samples(confusion_matrix)
dtypes = [dtypes_lib.int16, dtypes_lib.int32, dtypes_lib.int64]
shapes = [
(len(labels,)), # 1-dim
(len(labels), 1)
] # 2-dim
weights = [None, np.ones_like(labels)]
for dtype in dtypes:
for shape in shapes:
for weight in weights:
with self.test_session() as sess:
predictions_tensor = constant_op.constant(
np.reshape(predictions, shape), dtype=dtype)
labels_tensor = constant_op.constant(
np.reshape(labels, shape), dtype=dtype)
kappa, update_op = metrics.cohen_kappa(
labels_tensor, predictions_tensor, 3, weights=weight)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 2)
self.assertAlmostEqual(expect, kappa.eval(), 2)
def testAllCorrect(self):
inputs = np.arange(0, 100) % 4
# confusion matrix
# [[25, 0, 0],
# [0, 25, 0],
# [0, 0, 25]]
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(inputs, inputs)
expect = 1.0
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
kappa, update_op = metrics.cohen_kappa(labels, predictions, 4)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 5)
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testAllIncorrect(self):
labels = np.arange(0, 100) % 4
predictions = (labels + 1) % 4
# confusion matrix
# [[0, 25, 0],
# [0, 0, 25],
# [25, 0, 0]]
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(labels, predictions)
expect = -0.333333333333
with self.test_session() as sess:
predictions = constant_op.constant(predictions, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels)
kappa, update_op = metrics.cohen_kappa(labels, predictions, 4)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 5)
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testWeighted(self):
confusion_matrix = np.array([[9, 3, 1], [4, 8, 2], [2, 1, 6]])
labels, predictions = self._confusion_matrix_to_samples(confusion_matrix)
num_samples = np.sum(confusion_matrix, dtype=np.int32)
weights = (np.arange(0, num_samples) % 5) / 5.0
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(
# labels, predictions, sample_weight=weights)
expect = 0.453466583385
with self.test_session() as sess:
predictions = constant_op.constant(predictions, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels)
kappa, update_op = metrics.cohen_kappa(
labels, predictions, 4, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 5)
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testWithMultipleUpdates(self):
confusion_matrix = np.array([[90, 30, 10, 20], [40, 80, 20, 30],
[20, 10, 60, 35], [15, 25, 30, 25]])
labels, predictions = self._confusion_matrix_to_samples(confusion_matrix)
num_samples = np.sum(confusion_matrix, dtype=np.int32)
weights = (np.arange(0, num_samples) % 5) / 5.0
num_classes = confusion_matrix.shape[0]
batch_size = num_samples // 10
predictions_t = array_ops.placeholder(
dtypes_lib.float32, shape=(batch_size,))
labels_t = array_ops.placeholder(dtypes_lib.int32, shape=(batch_size,))
weights_t = array_ops.placeholder(dtypes_lib.float32, shape=(batch_size,))
kappa, update_op = metrics.cohen_kappa(
labels_t, predictions_t, num_classes, weights=weights_t)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
for idx in range(0, num_samples, batch_size):
batch_start, batch_end = idx, idx + batch_size
sess.run(
update_op,
feed_dict={
labels_t: labels[batch_start:batch_end],
predictions_t: predictions[batch_start:batch_end],
weights_t: weights[batch_start:batch_end]
})
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(
# labels_np, predictions_np, sample_weight=weights_np)
expect = 0.289965397924
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testInvalidNumClasses(self):
predictions = array_ops.placeholder(dtypes_lib.float32, shape=(4, 1))
labels = array_ops.placeholder(dtypes_lib.int32, shape=(4, 1))
with self.assertRaisesRegexp(ValueError, 'num_classes'):
metrics.cohen_kappa(labels, predictions, 1)
def testInvalidDimension(self):
predictions = array_ops.placeholder(dtypes_lib.float32, shape=(4, 1))
invalid_labels = array_ops.placeholder(dtypes_lib.int32, shape=(4, 2))
with self.assertRaises(ValueError):
metrics.cohen_kappa(invalid_labels, predictions, 3)
invalid_predictions = array_ops.placeholder(
dtypes_lib.float32, shape=(4, 2))
labels = array_ops.placeholder(dtypes_lib.int32, shape=(4, 1))
with self.assertRaises(ValueError):
metrics.cohen_kappa(labels, invalid_predictions, 3)
if __name__ == '__main__':
test.main()
| apache-2.0 |
strongh/GPy | GPy/util/datasets.py | 4 | 64376 | import csv
import os
import copy
import numpy as np
import GPy
import scipy.io
import cPickle as pickle
import zipfile
import tarfile
import datetime
import json
import re
from config import *
ipython_available=True
try:
import IPython
except ImportError:
ipython_available=False
import sys, urllib2
def reporthook(a,b,c):
# ',' at the end of the line is important!
#print "% 3.1f%% of %d bytes\r" % (min(100, float(a * b) / c * 100), c),
#you can also use sys.stdout.write
sys.stdout.write("\r% 3.1f%% of %d bytes" % (min(100, float(a * b) / c * 100), c))
sys.stdout.flush()
# Global variables
data_path = os.path.expandvars(config.get('datasets', 'dir'))
#data_path = os.path.join(os.path.dirname(__file__), 'datasets')
default_seed = 10000
overide_manual_authorize=False
neil_url = 'http://staffwww.dcs.shef.ac.uk/people/N.Lawrence/dataset_mirror/'
# Read data resources from json file.
# Don't do this when ReadTheDocs is scanning as it breaks things
on_rtd = os.environ.get('READTHEDOCS', None) == 'True' #Checks if RTD is scanning
if not (on_rtd):
path = os.path.join(os.path.dirname(__file__), 'data_resources.json')
json_data=open(path).read()
data_resources = json.loads(json_data)
if not (on_rtd):
path = os.path.join(os.path.dirname(__file__), 'football_teams.json')
json_data=open(path).read()
football_dict = json.loads(json_data)
def prompt_user(prompt):
"""Ask user for agreeing to data set licenses."""
# raw_input returns the empty string for "enter"
yes = set(['yes', 'y'])
no = set(['no','n'])
try:
print(prompt)
choice = raw_input().lower()
# would like to test for exception here, but not sure if we can do that without importing IPython
except:
print('Stdin is not implemented.')
print('You need to set')
print('overide_manual_authorize=True')
print('to proceed with the download. Please set that variable and continue.')
raise
if choice in yes:
return True
elif choice in no:
return False
else:
print("Your response was a " + choice)
print("Please respond with 'yes', 'y' or 'no', 'n'")
#return prompt_user()
def data_available(dataset_name=None):
"""Check if the data set is available on the local machine already."""
from itertools import izip_longest
dr = data_resources[dataset_name]
zip_urls = (dr['files'], )
if dr.has_key('save_names'): zip_urls += (dr['save_names'], )
else: zip_urls += ([],)
for file_list, save_list in izip_longest(*zip_urls, fillvalue=[]):
for f, s in izip_longest(file_list, save_list, fillvalue=None):
if s is not None: f=s # If there is a save_name given, use that one
if not os.path.exists(os.path.join(data_path, dataset_name, f)):
return False
return True
def download_url(url, store_directory, save_name=None, messages=True, suffix=''):
"""Download a file from a url and save it to disk."""
i = url.rfind('/')
file = url[i+1:]
print file
dir_name = os.path.join(data_path, store_directory)
if save_name is None: save_name = os.path.join(dir_name, file)
else: save_name = os.path.join(dir_name, save_name)
if suffix is None: suffix=''
print "Downloading ", url, "->", save_name
if not os.path.exists(dir_name):
os.makedirs(dir_name)
try:
response = urllib2.urlopen(url+suffix)
except urllib2.URLError, e:
if not hasattr(e, "code"):
raise
response = e
if response.code > 399 and response.code<500:
raise ValueError('Tried url ' + url + suffix + ' and received client error ' + str(response.code))
elif response.code > 499:
raise ValueError('Tried url ' + url + suffix + ' and received server error ' + str(response.code))
with open(save_name, 'wb') as f:
meta = response.info()
content_length_str = meta.getheaders("Content-Length")
if content_length_str:
file_size = int(content_length_str[0])
else:
file_size = None
status = ""
file_size_dl = 0
block_sz = 8192
line_length=30
while True:
buff = response.read(block_sz)
if not buff:
break
file_size_dl += len(buff)
f.write(buff)
sys.stdout.write(" "*(len(status)) + "\r")
if file_size:
status = r"[{perc: <{ll}}] {dl:7.3f}/{full:.3f}MB".format(dl=file_size_dl/(1048576.),
full=file_size/(1048576.), ll=line_length,
perc="="*int(line_length*float(file_size_dl)/file_size))
else:
status = r"[{perc: <{ll}}] {dl:7.3f}MB".format(dl=file_size_dl/(1048576.),
ll=line_length,
perc="."*int(line_length*float(file_size_dl/(10*1048576.))))
sys.stdout.write(status)
sys.stdout.flush()
sys.stdout.write(" "*(len(status)) + "\r")
print status
# if we wanted to get more sophisticated maybe we should check the response code here again even for successes.
#with open(save_name, 'wb') as f:
# f.write(response.read())
#urllib.urlretrieve(url+suffix, save_name, reporthook)
def authorize_download(dataset_name=None):
"""Check with the user that the are happy with terms and conditions for the data set."""
print('Acquiring resource: ' + dataset_name)
# TODO, check resource is in dictionary!
print('')
dr = data_resources[dataset_name]
print('Details of data: ')
print(dr['details'])
print('')
if dr['citation']:
print('Please cite:')
print(dr['citation'])
print('')
if dr['size']:
print('After downloading the data will take up ' + str(dr['size']) + ' bytes of space.')
print('')
print('Data will be stored in ' + os.path.join(data_path, dataset_name) + '.')
print('')
if overide_manual_authorize:
if dr['license']:
print('You have agreed to the following license:')
print(dr['license'])
print('')
return True
else:
if dr['license']:
print('You must also agree to the following license:')
print(dr['license'])
print('')
return prompt_user('Do you wish to proceed with the download? [yes/no]')
def download_data(dataset_name=None):
"""Check with the user that the are happy with terms and conditions for the data set, then download it."""
import itertools
dr = data_resources[dataset_name]
if not authorize_download(dataset_name):
raise Exception("Permission to download data set denied.")
zip_urls = (dr['urls'], dr['files'])
if dr.has_key('save_names'): zip_urls += (dr['save_names'], )
else: zip_urls += ([],)
if dr.has_key('suffices'): zip_urls += (dr['suffices'], )
else: zip_urls += ([],)
for url, files, save_names, suffices in itertools.izip_longest(*zip_urls, fillvalue=[]):
for f, save_name, suffix in itertools.izip_longest(files, save_names, suffices, fillvalue=None):
download_url(os.path.join(url,f), dataset_name, save_name, suffix=suffix)
return True
def data_details_return(data, data_set):
"""Update the data component of the data dictionary with details drawn from the data_resources."""
data.update(data_resources[data_set])
return data
def cmu_urls_files(subj_motions, messages = True):
'''
Find which resources are missing on the local disk for the requested CMU motion capture motions.
'''
dr = data_resources['cmu_mocap_full']
cmu_url = dr['urls'][0]
subjects_num = subj_motions[0]
motions_num = subj_motions[1]
resource = {'urls' : [], 'files' : []}
# Convert numbers to strings
subjects = []
motions = [list() for _ in range(len(subjects_num))]
for i in range(len(subjects_num)):
curSubj = str(int(subjects_num[i]))
if int(subjects_num[i]) < 10:
curSubj = '0' + curSubj
subjects.append(curSubj)
for j in range(len(motions_num[i])):
curMot = str(int(motions_num[i][j]))
if int(motions_num[i][j]) < 10:
curMot = '0' + curMot
motions[i].append(curMot)
all_skels = []
assert len(subjects) == len(motions)
all_motions = []
for i in range(len(subjects)):
skel_dir = os.path.join(data_path, 'cmu_mocap')
cur_skel_file = os.path.join(skel_dir, subjects[i] + '.asf')
url_required = False
file_download = []
if not os.path.exists(cur_skel_file):
# Current skel file doesn't exist.
if not os.path.isdir(skel_dir):
os.makedirs(skel_dir)
# Add skel file to list.
url_required = True
file_download.append(subjects[i] + '.asf')
for j in range(len(motions[i])):
file_name = subjects[i] + '_' + motions[i][j] + '.amc'
cur_motion_file = os.path.join(skel_dir, file_name)
if not os.path.exists(cur_motion_file):
url_required = True
file_download.append(subjects[i] + '_' + motions[i][j] + '.amc')
if url_required:
resource['urls'].append(cmu_url + '/' + subjects[i] + '/')
resource['files'].append(file_download)
return resource
try:
import gpxpy
import gpxpy.gpx
gpxpy_available = True
except ImportError:
gpxpy_available = False
if gpxpy_available:
def epomeo_gpx(data_set='epomeo_gpx', sample_every=4):
if not data_available(data_set):
download_data(data_set)
files = ['endomondo_1', 'endomondo_2', 'garmin_watch_via_endomondo','viewranger_phone', 'viewranger_tablet']
X = []
for file in files:
gpx_file = open(os.path.join(data_path, 'epomeo_gpx', file + '.gpx'), 'r')
gpx = gpxpy.parse(gpx_file)
segment = gpx.tracks[0].segments[0]
points = [point for track in gpx.tracks for segment in track.segments for point in segment.points]
data = [[(point.time-datetime.datetime(2013,8,21)).total_seconds(), point.latitude, point.longitude, point.elevation] for point in points]
X.append(np.asarray(data)[::sample_every, :])
gpx_file.close()
return data_details_return({'X' : X, 'info' : 'Data is an array containing time in seconds, latitude, longitude and elevation in that order.'}, data_set)
#del gpxpy_available
# Some general utilities.
def sample_class(f):
p = 1. / (1. + np.exp(-f))
c = np.random.binomial(1, p)
c = np.where(c, 1, -1)
return c
def boston_housing(data_set='boston_housing'):
if not data_available(data_set):
download_data(data_set)
all_data = np.genfromtxt(os.path.join(data_path, data_set, 'housing.data'))
X = all_data[:, 0:13]
Y = all_data[:, 13:14]
return data_details_return({'X' : X, 'Y': Y}, data_set)
def brendan_faces(data_set='brendan_faces'):
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'frey_rawface.mat'))
Y = mat_data['ff'].T
return data_details_return({'Y': Y}, data_set)
def della_gatta_TRP63_gene_expression(data_set='della_gatta', gene_number=None):
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'DellaGattadata.mat'))
X = np.double(mat_data['timepoints'])
if gene_number == None:
Y = mat_data['exprs_tp53_RMA']
else:
Y = mat_data['exprs_tp53_RMA'][:, gene_number]
if len(Y.shape) == 1:
Y = Y[:, None]
return data_details_return({'X': X, 'Y': Y, 'gene_number' : gene_number}, data_set)
def football_data(season='1314', data_set='football_data'):
"""Football data from English games since 1993. This downloads data from football-data.co.uk for the given season. """
def league2num(string):
league_dict = {'E0':0, 'E1':1, 'E2': 2, 'E3': 3, 'EC':4}
return league_dict[string]
def football2num(string):
if football_dict.has_key(string):
return football_dict[string]
else:
football_dict[string] = len(football_dict)+1
return len(football_dict)+1
data_set_season = data_set + '_' + season
data_resources[data_set_season] = copy.deepcopy(data_resources[data_set])
data_resources[data_set_season]['urls'][0]+=season + '/'
start_year = int(season[0:2])
end_year = int(season[2:4])
files = ['E0.csv', 'E1.csv', 'E2.csv', 'E3.csv']
if start_year>4 and start_year < 93:
files += ['EC.csv']
data_resources[data_set_season]['files'] = [files]
if not data_available(data_set_season):
download_data(data_set_season)
import pylab as pb
for file in reversed(files):
filename = os.path.join(data_path, data_set_season, file)
# rewrite files removing blank rows.
writename = os.path.join(data_path, data_set_season, 'temp.csv')
input = open(filename, 'rb')
output = open(writename, 'wb')
writer = csv.writer(output)
for row in csv.reader(input):
if any(field.strip() for field in row):
writer.writerow(row)
input.close()
output.close()
table = np.loadtxt(writename,skiprows=1, usecols=(0, 1, 2, 3, 4, 5), converters = {0: league2num, 1: pb.datestr2num, 2:football2num, 3:football2num}, delimiter=',')
X = table[:, :4]
Y = table[:, 4:]
return data_details_return({'X': X, 'Y': Y}, data_set)
def sod1_mouse(data_set='sod1_mouse'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'sod1_C57_129_exprs.csv')
Y = read_csv(filename, header=0, index_col=0)
num_repeats=4
num_time=4
num_cond=4
X = 1
return data_details_return({'X': X, 'Y': Y}, data_set)
def spellman_yeast(data_set='spellman_yeast'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'combined.txt')
Y = read_csv(filename, header=0, index_col=0, sep='\t')
return data_details_return({'Y': Y}, data_set)
def spellman_yeast_cdc15(data_set='spellman_yeast'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'combined.txt')
Y = read_csv(filename, header=0, index_col=0, sep='\t')
t = np.asarray([10, 30, 50, 70, 80, 90, 100, 110, 120, 130, 140, 150, 170, 180, 190, 200, 210, 220, 230, 240, 250, 270, 290])
times = ['cdc15_'+str(time) for time in t]
Y = Y[times].T
t = t[:, None]
return data_details_return({'Y' : Y, 't': t, 'info': 'Time series of synchronized yeast cells from the CDC-15 experiment of Spellman et al (1998).'}, data_set)
def lee_yeast_ChIP(data_set='lee_yeast_ChIP'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
import zipfile
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'binding_by_gene.tsv')
S = read_csv(filename, header=1, index_col=0, sep='\t')
transcription_factors = [col for col in S.columns if col[:7] != 'Unnamed']
annotations = S[['Unnamed: 1', 'Unnamed: 2', 'Unnamed: 3']]
S = S[transcription_factors]
return data_details_return({'annotations' : annotations, 'Y' : S, 'transcription_factors': transcription_factors}, data_set)
def fruitfly_tomancak(data_set='fruitfly_tomancak', gene_number=None):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'tomancak_exprs.csv')
Y = read_csv(filename, header=0, index_col=0).T
num_repeats = 3
num_time = 12
xt = np.linspace(0, num_time-1, num_time)
xr = np.linspace(0, num_repeats-1, num_repeats)
xtime, xrepeat = np.meshgrid(xt, xr)
X = np.vstack((xtime.flatten(), xrepeat.flatten())).T
return data_details_return({'X': X, 'Y': Y, 'gene_number' : gene_number}, data_set)
def drosophila_protein(data_set='drosophila_protein'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'becker_et_al.csv')
Y = read_csv(filename, header=0)
return data_details_return({'Y': Y}, data_set)
def drosophila_knirps(data_set='drosophila_protein'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'becker_et_al.csv')
# in the csv file we have facts_kni and ext_kni. We treat facts_kni as protein and ext_kni as mRNA
df = read_csv(filename, header=0)
t = df['t'][:,None]
x = df['x'][:,None]
g = df['expression1'][:,None]
p = df['expression2'][:,None]
leng = x.shape[0]
T = np.vstack([t,t])
S = np.vstack([x,x])
inx = np.zeros(leng*2)[:,None]
inx[leng*2/2:leng*2]=1
X = np.hstack([T,S,inx])
Y = np.vstack([g,p])
return data_details_return({'Y': Y, 'X': X}, data_set)
# This will be for downloading google trends data.
def google_trends(query_terms=['big data', 'machine learning', 'data science'], data_set='google_trends', refresh_data=False):
"""Data downloaded from Google trends for given query terms. Warning, if you use this function multiple times in a row you get blocked due to terms of service violations. The function will cache the result of your query, if you wish to refresh an old query set refresh_data to True. The function is inspired by this notebook: http://nbviewer.ipython.org/github/sahuguet/notebooks/blob/master/GoogleTrends%20meet%20Notebook.ipynb"""
query_terms.sort()
import pandas
# Create directory name for data
dir_path = os.path.join(data_path,'google_trends')
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
dir_name = '-'.join(query_terms)
dir_name = dir_name.replace(' ', '_')
dir_path = os.path.join(dir_path,dir_name)
file = 'data.csv'
file_name = os.path.join(dir_path,file)
if not os.path.exists(file_name) or refresh_data:
print "Accessing Google trends to acquire the data. Note that repeated accesses will result in a block due to a google terms of service violation. Failure at this point may be due to such blocks."
# quote the query terms.
quoted_terms = []
for term in query_terms:
quoted_terms.append(urllib2.quote(term))
print "Query terms: ", ', '.join(query_terms)
print "Fetching query:"
query = 'http://www.google.com/trends/fetchComponent?q=%s&cid=TIMESERIES_GRAPH_0&export=3' % ",".join(quoted_terms)
data = urllib2.urlopen(query).read()
print "Done."
# In the notebook they did some data cleaning: remove Javascript header+footer, and translate new Date(....,..,..) into YYYY-MM-DD.
header = """// Data table response\ngoogle.visualization.Query.setResponse("""
data = data[len(header):-2]
data = re.sub('new Date\((\d+),(\d+),(\d+)\)', (lambda m: '"%s-%02d-%02d"' % (m.group(1).strip(), 1+int(m.group(2)), int(m.group(3)))), data)
timeseries = json.loads(data)
columns = [k['label'] for k in timeseries['table']['cols']]
rows = map(lambda x: [k['v'] for k in x['c']], timeseries['table']['rows'])
df = pandas.DataFrame(rows, columns=columns)
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
df.to_csv(file_name)
else:
print "Reading cached data for google trends. To refresh the cache set 'refresh_data=True' when calling this function."
print "Query terms: ", ', '.join(query_terms)
df = pandas.read_csv(file_name, parse_dates=[0])
columns = df.columns
terms = len(query_terms)
import datetime
X = np.asarray([(row, i) for i in range(terms) for row in df.index])
Y = np.asarray([[df.ix[row][query_terms[i]]] for i in range(terms) for row in df.index ])
output_info = columns[1:]
return data_details_return({'data frame' : df, 'X': X, 'Y': Y, 'query_terms': output_info, 'info': "Data downloaded from google trends with query terms: " + ', '.join(output_info) + '.'}, data_set)
# The data sets
def oil(data_set='three_phase_oil_flow'):
"""The three phase oil data from Bishop and James (1993)."""
if not data_available(data_set):
download_data(data_set)
oil_train_file = os.path.join(data_path, data_set, 'DataTrn.txt')
oil_trainlbls_file = os.path.join(data_path, data_set, 'DataTrnLbls.txt')
oil_test_file = os.path.join(data_path, data_set, 'DataTst.txt')
oil_testlbls_file = os.path.join(data_path, data_set, 'DataTstLbls.txt')
oil_valid_file = os.path.join(data_path, data_set, 'DataVdn.txt')
oil_validlbls_file = os.path.join(data_path, data_set, 'DataVdnLbls.txt')
fid = open(oil_train_file)
X = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_test_file)
Xtest = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_valid_file)
Xvalid = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_trainlbls_file)
Y = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2. - 1.
fid.close()
fid = open(oil_testlbls_file)
Ytest = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2. - 1.
fid.close()
fid = open(oil_validlbls_file)
Yvalid = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2. - 1.
fid.close()
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'Xtest' : Xtest, 'Xvalid': Xvalid, 'Yvalid': Yvalid}, data_set)
#else:
# throw an error
def oil_100(seed=default_seed, data_set = 'three_phase_oil_flow'):
np.random.seed(seed=seed)
data = oil()
indices = np.random.permutation(1000)
indices = indices[0:100]
X = data['X'][indices, :]
Y = data['Y'][indices, :]
return data_details_return({'X': X, 'Y': Y, 'info': "Subsample of the full oil data extracting 100 values randomly without replacement, here seed was " + str(seed)}, data_set)
def pumadyn(seed=default_seed, data_set='pumadyn-32nm'):
if not data_available(data_set):
download_data(data_set)
path = os.path.join(data_path, data_set)
tar = tarfile.open(os.path.join(path, 'pumadyn-32nm.tar.gz'))
print('Extracting file.')
tar.extractall(path=path)
tar.close()
# Data is variance 1, no need to normalize.
data = np.loadtxt(os.path.join(data_path, data_set, 'pumadyn-32nm', 'Dataset.data.gz'))
indices = np.random.permutation(data.shape[0])
indicesTrain = indices[0:7168]
indicesTest = indices[7168:-1]
indicesTrain.sort(axis=0)
indicesTest.sort(axis=0)
X = data[indicesTrain, 0:-2]
Y = data[indicesTrain, -1][:, None]
Xtest = data[indicesTest, 0:-2]
Ytest = data[indicesTest, -1][:, None]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'seed': seed}, data_set)
def robot_wireless(data_set='robot_wireless'):
# WiFi access point strengths on a tour around UW Paul Allen building.
if not data_available(data_set):
download_data(data_set)
file_name = os.path.join(data_path, data_set, 'uw-floor.txt')
all_time = np.genfromtxt(file_name, usecols=(0))
macaddress = np.genfromtxt(file_name, usecols=(1), dtype='string')
x = np.genfromtxt(file_name, usecols=(2))
y = np.genfromtxt(file_name, usecols=(3))
strength = np.genfromtxt(file_name, usecols=(4))
addresses = np.unique(macaddress)
times = np.unique(all_time)
addresses.sort()
times.sort()
allY = np.zeros((len(times), len(addresses)))
allX = np.zeros((len(times), 2))
allY[:]=-92.
strengths={}
for address, j in zip(addresses, range(len(addresses))):
ind = np.nonzero(address==macaddress)
temp_strengths=strength[ind]
temp_x=x[ind]
temp_y=y[ind]
temp_times = all_time[ind]
for time in temp_times:
vals = time==temp_times
if any(vals):
ind2 = np.nonzero(vals)
i = np.nonzero(time==times)
allY[i, j] = temp_strengths[ind2]
allX[i, 0] = temp_x[ind2]
allX[i, 1] = temp_y[ind2]
allY = (allY + 85.)/15.
X = allX[0:215, :]
Y = allY[0:215, :]
Xtest = allX[215:, :]
Ytest = allY[215:, :]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'addresses' : addresses, 'times' : times}, data_set)
def silhouette(data_set='ankur_pose_data'):
# Ankur Agarwal and Bill Trigg's silhoutte data.
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'ankurDataPoseSilhouette.mat'))
inMean = np.mean(mat_data['Y'])
inScales = np.sqrt(np.var(mat_data['Y']))
X = mat_data['Y'] - inMean
X = X / inScales
Xtest = mat_data['Y_test'] - inMean
Xtest = Xtest / inScales
Y = mat_data['Z']
Ytest = mat_data['Z_test']
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest}, data_set)
def decampos_digits(data_set='decampos_characters', which_digits=[0,1,2,3,4,5,6,7,8,9]):
if not data_available(data_set):
download_data(data_set)
path = os.path.join(data_path, data_set)
digits = np.load(os.path.join(path, 'digits.npy'))
digits = digits[which_digits,:,:,:]
num_classes, num_samples, height, width = digits.shape
Y = digits.reshape((digits.shape[0]*digits.shape[1],digits.shape[2]*digits.shape[3]))
lbls = np.array([[l]*num_samples for l in which_digits]).reshape(Y.shape[0], 1)
str_lbls = np.array([[str(l)]*num_samples for l in which_digits])
return data_details_return({'Y': Y, 'lbls': lbls, 'str_lbls' : str_lbls, 'info': 'Digits data set from the de Campos characters data'}, data_set)
def ripley_synth(data_set='ripley_prnn_data'):
if not data_available(data_set):
download_data(data_set)
train = np.genfromtxt(os.path.join(data_path, data_set, 'synth.tr'), skip_header=1)
X = train[:, 0:2]
y = train[:, 2:3]
test = np.genfromtxt(os.path.join(data_path, data_set, 'synth.te'), skip_header=1)
Xtest = test[:, 0:2]
ytest = test[:, 2:3]
return data_details_return({'X': X, 'Y': y, 'Xtest': Xtest, 'Ytest': ytest, 'info': 'Synthetic data generated by Ripley for a two class classification problem.'}, data_set)
def global_average_temperature(data_set='global_temperature', num_train=1000, refresh_data=False):
path = os.path.join(data_path, data_set)
if data_available(data_set) and not refresh_data:
print 'Using cached version of the data set, to use latest version set refresh_data to True'
else:
download_data(data_set)
data = np.loadtxt(os.path.join(data_path, data_set, 'GLBTS.long.data'))
print 'Most recent data observation from month ', data[-1, 1], ' in year ', data[-1, 0]
allX = data[data[:, 3]!=-99.99, 2:3]
allY = data[data[:, 3]!=-99.99, 3:4]
X = allX[:num_train, 0:1]
Xtest = allX[num_train:, 0:1]
Y = allY[:num_train, 0:1]
Ytest = allY[num_train:, 0:1]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'info': "Mauna Loa data with " + str(num_train) + " values used as training points."}, data_set)
def mauna_loa(data_set='mauna_loa', num_train=545, refresh_data=False):
path = os.path.join(data_path, data_set)
if data_available(data_set) and not refresh_data:
print 'Using cached version of the data set, to use latest version set refresh_data to True'
else:
download_data(data_set)
data = np.loadtxt(os.path.join(data_path, data_set, 'co2_mm_mlo.txt'))
print 'Most recent data observation from month ', data[-1, 1], ' in year ', data[-1, 0]
allX = data[data[:, 3]!=-99.99, 2:3]
allY = data[data[:, 3]!=-99.99, 3:4]
X = allX[:num_train, 0:1]
Xtest = allX[num_train:, 0:1]
Y = allY[:num_train, 0:1]
Ytest = allY[num_train:, 0:1]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'info': "Mauna Loa data with " + str(num_train) + " values used as training points."}, data_set)
def boxjenkins_airline(data_set='boxjenkins_airline', num_train=96):
path = os.path.join(data_path, data_set)
if not data_available(data_set):
download_data(data_set)
data = np.loadtxt(os.path.join(data_path, data_set, 'boxjenkins_airline.csv'), delimiter=',')
Y = data[:num_train, 1:2]
X = data[:num_train, 0:1]
Xtest = data[num_train:, 0:1]
Ytest = data[num_train:, 1:2]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'info': "Montly airline passenger data from Box & Jenkins 1976."}, data_set)
def osu_run1(data_set='osu_run1', sample_every=4):
path = os.path.join(data_path, data_set)
if not data_available(data_set):
download_data(data_set)
zip = zipfile.ZipFile(os.path.join(data_path, data_set, 'run1TXT.ZIP'), 'r')
for name in zip.namelist():
zip.extract(name, path)
Y, connect = GPy.util.mocap.load_text_data('Aug210106', path)
Y = Y[0:-1:sample_every, :]
return data_details_return({'Y': Y, 'connect' : connect}, data_set)
def swiss_roll_generated(num_samples=1000, sigma=0.0):
with open(os.path.join(os.path.dirname(__file__), 'datasets', 'swiss_roll.pickle')) as f:
data = pickle.load(f)
Na = data['Y'].shape[0]
perm = np.random.permutation(np.r_[:Na])[:num_samples]
Y = data['Y'][perm, :]
t = data['t'][perm]
c = data['colors'][perm, :]
so = np.argsort(t)
Y = Y[so, :]
t = t[so]
c = c[so, :]
return {'Y':Y, 't':t, 'colors':c}
def hapmap3(data_set='hapmap3'):
"""
The HapMap phase three SNP dataset - 1184 samples out of 11 populations.
SNP_matrix (A) encoding [see Paschou et all. 2007 (PCA-Correlated SNPs...)]:
Let (B1,B2) be the alphabetically sorted bases, which occur in the j-th SNP, then
/ 1, iff SNPij==(B1,B1)
Aij = | 0, iff SNPij==(B1,B2)
\ -1, iff SNPij==(B2,B2)
The SNP data and the meta information (such as iid, sex and phenotype) are
stored in the dataframe datadf, index is the Individual ID,
with following columns for metainfo:
* family_id -> Family ID
* paternal_id -> Paternal ID
* maternal_id -> Maternal ID
* sex -> Sex (1=male; 2=female; other=unknown)
* phenotype -> Phenotype (-9, or 0 for unknown)
* population -> Population string (e.g. 'ASW' - 'YRI')
* rest are SNP rs (ids)
More information is given in infodf:
* Chromosome:
- autosomal chromosemes -> 1-22
- X X chromosome -> 23
- Y Y chromosome -> 24
- XY Pseudo-autosomal region of X -> 25
- MT Mitochondrial -> 26
* Relative Positon (to Chromosome) [base pairs]
"""
try:
from pandas import read_pickle, DataFrame
from sys import stdout
import bz2
except ImportError as i:
raise i, "Need pandas for hapmap dataset, make sure to install pandas (http://pandas.pydata.org/) before loading the hapmap dataset"
dir_path = os.path.join(data_path,'hapmap3')
hapmap_file_name = 'hapmap3_r2_b36_fwd.consensus.qc.poly'
unpacked_files = [os.path.join(dir_path, hapmap_file_name+ending) for ending in ['.ped', '.map']]
unpacked_files_exist = reduce(lambda a, b:a and b, map(os.path.exists, unpacked_files))
if not unpacked_files_exist and not data_available(data_set):
download_data(data_set)
preprocessed_data_paths = [os.path.join(dir_path,hapmap_file_name + file_name) for file_name in \
['.snps.pickle',
'.info.pickle',
'.nan.pickle']]
if not reduce(lambda a,b: a and b, map(os.path.exists, preprocessed_data_paths)):
if not overide_manual_authorize and not prompt_user("Preprocessing requires ~25GB "
"of memory and can take a (very) long time, continue? [Y/n]"):
print "Preprocessing required for further usage."
return
status = "Preprocessing data, please be patient..."
print status
def write_status(message, progress, status):
stdout.write(" "*len(status)); stdout.write("\r"); stdout.flush()
status = r"[{perc: <{ll}}] {message: <13s}".format(message=message, ll=20,
perc="="*int(20.*progress/100.))
stdout.write(status); stdout.flush()
return status
if not unpacked_files_exist:
status=write_status('unpacking...', 0, '')
curr = 0
for newfilepath in unpacked_files:
if not os.path.exists(newfilepath):
filepath = newfilepath + '.bz2'
file_size = os.path.getsize(filepath)
with open(newfilepath, 'wb') as new_file, open(filepath, 'rb') as f:
decomp = bz2.BZ2Decompressor()
file_processed = 0
buffsize = 100 * 1024
for data in iter(lambda : f.read(buffsize), b''):
new_file.write(decomp.decompress(data))
file_processed += len(data)
status=write_status('unpacking...', curr+12.*file_processed/(file_size), status)
curr += 12
status=write_status('unpacking...', curr, status)
os.remove(filepath)
status=write_status('reading .ped...', 25, status)
# Preprocess data:
snpstrnp = np.loadtxt(unpacked_files[0], dtype=str)
status=write_status('reading .map...', 33, status)
mapnp = np.loadtxt(unpacked_files[1], dtype=str)
status=write_status('reading relationships.txt...', 42, status)
# and metainfo:
infodf = DataFrame.from_csv(os.path.join(dir_path,'./relationships_w_pops_121708.txt'), header=0, sep='\t')
infodf.set_index('IID', inplace=1)
status=write_status('filtering nan...', 45, status)
snpstr = snpstrnp[:,6:].astype('S1').reshape(snpstrnp.shape[0], -1, 2)
inan = snpstr[:,:,0] == '0'
status=write_status('filtering reference alleles...', 55, status)
ref = np.array(map(lambda x: np.unique(x)[-2:], snpstr.swapaxes(0,1)[:,:,:]))
status=write_status('encoding snps...', 70, status)
# Encode the information for each gene in {-1,0,1}:
status=write_status('encoding snps...', 73, status)
snps = (snpstr==ref[None,:,:])
status=write_status('encoding snps...', 76, status)
snps = (snps*np.array([1,-1])[None,None,:])
status=write_status('encoding snps...', 78, status)
snps = snps.sum(-1)
status=write_status('encoding snps...', 81, status)
snps = snps.astype('i8')
status=write_status('marking nan values...', 88, status)
# put in nan values (masked as -128):
snps[inan] = -128
status=write_status('setting up meta...', 94, status)
# get meta information:
metaheader = np.r_[['family_id', 'iid', 'paternal_id', 'maternal_id', 'sex', 'phenotype']]
metadf = DataFrame(columns=metaheader, data=snpstrnp[:,:6])
metadf.set_index('iid', inplace=1)
metadf = metadf.join(infodf.population)
metadf.to_pickle(preprocessed_data_paths[1])
# put everything together:
status=write_status('setting up snps...', 96, status)
snpsdf = DataFrame(index=metadf.index, data=snps, columns=mapnp[:,1])
with open(preprocessed_data_paths[0], 'wb') as f:
pickle.dump(f, snpsdf, protocoll=-1)
status=write_status('setting up snps...', 98, status)
inandf = DataFrame(index=metadf.index, data=inan, columns=mapnp[:,1])
inandf.to_pickle(preprocessed_data_paths[2])
status=write_status('done :)', 100, status)
print ''
else:
print "loading snps..."
snpsdf = read_pickle(preprocessed_data_paths[0])
print "loading metainfo..."
metadf = read_pickle(preprocessed_data_paths[1])
print "loading nan entries..."
inandf = read_pickle(preprocessed_data_paths[2])
snps = snpsdf.values
populations = metadf.population.values.astype('S3')
hapmap = dict(name=data_set,
description='The HapMap phase three SNP dataset - '
'1184 samples out of 11 populations. inan is a '
'boolean array, containing wheather or not the '
'given entry is nan (nans are masked as '
'-128 in snps).',
snpsdf=snpsdf,
metadf=metadf,
snps=snps,
inan=inandf.values,
inandf=inandf,
populations=populations)
return hapmap
def singlecell(data_set='singlecell'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'singlecell.csv')
Y = read_csv(filename, header=0, index_col=0)
genes = Y.columns
labels = Y.index
# data = np.loadtxt(os.path.join(dir_path, 'singlecell.csv'), delimiter=",", dtype=str)
return data_details_return({'Y': Y, 'info' : "qPCR singlecell experiment in Mouse, measuring 48 gene expressions in 1-64 cell states. The labels have been created as in Guo et al. [2010]",
'genes': genes, 'labels':labels,
}, data_set)
def singlecell_rna_seq_islam(dataset='singlecell_islam'):
if not data_available(dataset):
download_data(dataset)
from pandas import read_csv, DataFrame, concat
dir_path = os.path.join(data_path, dataset)
filename = os.path.join(dir_path, 'GSE29087_L139_expression_tab.txt.gz')
data = read_csv(filename, sep='\t', skiprows=6, compression='gzip', header=None)
header1 = read_csv(filename, sep='\t', header=None, skiprows=5, nrows=1, compression='gzip')
header2 = read_csv(filename, sep='\t', header=None, skiprows=3, nrows=1, compression='gzip')
data.columns = np.concatenate((header1.ix[0, :], header2.ix[0, 7:]))
Y = data.set_index("Feature").ix[8:, 6:-4].T.astype(float)
# read the info .soft
filename = os.path.join(dir_path, 'GSE29087_family.soft.gz')
info = read_csv(filename, sep='\t', skiprows=0, compression='gzip', header=None)
# split at ' = '
info = DataFrame(info.ix[:,0].str.split(' = ').tolist())
# only take samples:
info = info[info[0].str.contains("!Sample")]
info[0] = info[0].apply(lambda row: row[len("!Sample_"):])
groups = info.groupby(0).groups
# remove 'GGG' from barcodes
barcode = info[1][groups['barcode']].apply(lambda row: row[:-3])
title = info[1][groups['title']]
title.index = barcode
title.name = 'title'
geo_accession = info[1][groups['geo_accession']]
geo_accession.index = barcode
geo_accession.name = 'geo_accession'
case_id = info[1][groups['source_name_ch1']]
case_id.index = barcode
case_id.name = 'source_name_ch1'
info = concat([title, geo_accession, case_id], axis=1)
labels = info.join(Y).source_name_ch1[:-4]
labels[labels=='Embryonic stem cell'] = "ES"
labels[labels=='Embryonic fibroblast'] = "MEF"
return data_details_return({'Y': Y,
'info': '92 single cells (48 mouse ES cells, 44 mouse embryonic fibroblasts and 4 negative controls) were analyzed by single-cell tagged reverse transcription (STRT)',
'genes': Y.columns,
'labels': labels,
'datadf': data,
'infodf': info}, dataset)
def singlecell_rna_seq_deng(dataset='singlecell_deng'):
if not data_available(dataset):
download_data(dataset)
from pandas import read_csv, isnull
dir_path = os.path.join(data_path, dataset)
# read the info .soft
filename = os.path.join(dir_path, 'GSE45719_series_matrix.txt.gz')
info = read_csv(filename, sep='\t', skiprows=0, compression='gzip', header=None, nrows=29, index_col=0)
summary = info.loc['!Series_summary'][1]
design = info.loc['!Series_overall_design']
# only take samples:
sample_info = read_csv(filename, sep='\t', skiprows=30, compression='gzip', header=0, index_col=0).T
sample_info.columns = sample_info.columns.to_series().apply(lambda row: row[len("!Sample_"):])
sample_info.columns.name = sample_info.columns.name[len("!Sample_"):]
sample_info = sample_info[['geo_accession', 'characteristics_ch1', 'description']]
sample_info = sample_info.iloc[:, np.r_[0:4, 5:sample_info.shape[1]]]
c = sample_info.columns.to_series()
c[1:4] = ['strain', 'cross', 'developmental_stage']
sample_info.columns = c
# get the labels right:
rep = re.compile('\(.*\)')
def filter_dev_stage(row):
if isnull(row):
row = "2-cell stage embryo"
if row.startswith("developmental stage: "):
row = row[len("developmental stage: "):]
if row == 'adult':
row += " liver"
row = row.replace(' stage ', ' ')
row = rep.sub(' ', row)
row = row.strip(' ')
return row
labels = sample_info.developmental_stage.apply(filter_dev_stage)
# Extract the tar file
filename = os.path.join(dir_path, 'GSE45719_Raw.tar')
with tarfile.open(filename, 'r') as files:
print "Extracting Archive {}...".format(files.name)
data = None
gene_info = None
message = ''
members = files.getmembers()
overall = len(members)
for i, file_info in enumerate(members):
f = files.extractfile(file_info)
inner = read_csv(f, sep='\t', header=0, compression='gzip', index_col=0)
print ' '*(len(message)+1) + '\r',
message = "{: >7.2%}: Extracting: {}".format(float(i+1)/overall, file_info.name[:20]+"...txt.gz")
print message,
if data is None:
data = inner.RPKM.to_frame()
data.columns = [file_info.name[:-18]]
gene_info = inner.Refseq_IDs.to_frame()
gene_info.columns = [file_info.name[:-18]]
else:
data[file_info.name[:-18]] = inner.RPKM
gene_info[file_info.name[:-18]] = inner.Refseq_IDs
# Strip GSM number off data index
rep = re.compile('GSM\d+_')
data.columns = data.columns.to_series().apply(lambda row: row[rep.match(row).end():])
data = data.T
# make sure the same index gets used
sample_info.index = data.index
# get the labels from the description
#rep = re.compile('fibroblast|\d+-cell|embryo|liver|early blastocyst|mid blastocyst|late blastocyst|blastomere|zygote', re.IGNORECASE)
sys.stdout.write(' '*len(message) + '\r')
sys.stdout.flush()
print
print "Read Archive {}".format(files.name)
return data_details_return({'Y': data,
'series_info': info,
'sample_info': sample_info,
'gene_info': gene_info,
'summary': summary,
'design': design,
'genes': data.columns,
'labels': labels,
}, dataset)
def swiss_roll_1000():
return swiss_roll(num_samples=1000)
def swiss_roll(num_samples=3000, data_set='swiss_roll'):
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'swiss_roll_data.mat'))
Y = mat_data['X_data'][:, 0:num_samples].transpose()
return data_details_return({'Y': Y, 'X': mat_data['X_data'], 'info': "The first " + str(num_samples) + " points from the swiss roll data of Tennenbaum, de Silva and Langford (2001)."}, data_set)
def isomap_faces(num_samples=698, data_set='isomap_face_data'):
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'face_data.mat'))
Y = mat_data['images'][:, 0:num_samples].transpose()
return data_details_return({'Y': Y, 'poses' : mat_data['poses'], 'lights': mat_data['lights'], 'info': "The first " + str(num_samples) + " points from the face data of Tennenbaum, de Silva and Langford (2001)."}, data_set)
def simulation_BGPLVM():
mat_data = scipy.io.loadmat(os.path.join(data_path, 'BGPLVMSimulation.mat'))
Y = np.array(mat_data['Y'], dtype=float)
S = np.array(mat_data['initS'], dtype=float)
mu = np.array(mat_data['initMu'], dtype=float)
#return data_details_return({'S': S, 'Y': Y, 'mu': mu}, data_set)
return {'Y': Y, 'S': S,
'mu' : mu,
'info': "Simulated test dataset generated in MATLAB to compare BGPLVM between python and MATLAB"}
def toy_rbf_1d(seed=default_seed, num_samples=500):
"""
Samples values of a function from an RBF covariance with very small noise for inputs uniformly distributed between -1 and 1.
:param seed: seed to use for random sampling.
:type seed: int
:param num_samples: number of samples to sample in the function (default 500).
:type num_samples: int
"""
np.random.seed(seed=seed)
num_in = 1
X = np.random.uniform(low= -1.0, high=1.0, size=(num_samples, num_in))
X.sort(axis=0)
rbf = GPy.kern.RBF(num_in, variance=1., lengthscale=np.array((0.25,)))
white = GPy.kern.White(num_in, variance=1e-2)
kernel = rbf + white
K = kernel.K(X)
y = np.reshape(np.random.multivariate_normal(np.zeros(num_samples), K), (num_samples, 1))
return {'X':X, 'Y':y, 'info': "Sampled " + str(num_samples) + " values of a function from an RBF covariance with very small noise for inputs uniformly distributed between -1 and 1."}
def toy_rbf_1d_50(seed=default_seed):
np.random.seed(seed=seed)
data = toy_rbf_1d()
indices = np.random.permutation(data['X'].shape[0])
indices = indices[0:50]
indices.sort(axis=0)
X = data['X'][indices, :]
Y = data['Y'][indices, :]
return {'X': X, 'Y': Y, 'info': "Subsamples the toy_rbf_sample with 50 values randomly taken from the original sample.", 'seed' : seed}
def toy_linear_1d_classification(seed=default_seed):
np.random.seed(seed=seed)
x1 = np.random.normal(-3, 5, 20)
x2 = np.random.normal(3, 5, 20)
X = (np.r_[x1, x2])[:, None]
return {'X': X, 'Y': sample_class(2.*X), 'F': 2.*X, 'seed' : seed}
def olivetti_glasses(data_set='olivetti_glasses', num_training=200, seed=default_seed):
path = os.path.join(data_path, data_set)
if not data_available(data_set):
download_data(data_set)
y = np.load(os.path.join(path, 'has_glasses.np'))
y = np.where(y=='y',1,0).reshape(-1,1)
faces = scipy.io.loadmat(os.path.join(path, 'olivettifaces.mat'))['faces'].T
np.random.seed(seed=seed)
index = np.random.permutation(faces.shape[0])
X = faces[index[:num_training],:]
Xtest = faces[index[num_training:],:]
Y = y[index[:num_training],:]
Ytest = y[index[num_training:]]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'seed' : seed, 'info': "ORL Faces with labels identifiying who is wearing glasses and who isn't. Data is randomly partitioned according to given seed. Presence or absence of glasses was labelled by James Hensman."}, 'olivetti_faces')
def olivetti_faces(data_set='olivetti_faces'):
path = os.path.join(data_path, data_set)
if not data_available(data_set):
download_data(data_set)
zip = zipfile.ZipFile(os.path.join(path, 'att_faces.zip'), 'r')
for name in zip.namelist():
zip.extract(name, path)
Y = []
lbls = []
for subject in range(40):
for image in range(10):
image_path = os.path.join(path, 'orl_faces', 's'+str(subject+1), str(image+1) + '.pgm')
from GPy.util import netpbmfile
Y.append(netpbmfile.imread(image_path).flatten())
lbls.append(subject)
Y = np.asarray(Y)
lbls = np.asarray(lbls)[:, None]
return data_details_return({'Y': Y, 'lbls' : lbls, 'info': "ORL Faces processed to 64x64 images."}, data_set)
def xw_pen(data_set='xw_pen'):
if not data_available(data_set):
download_data(data_set)
Y = np.loadtxt(os.path.join(data_path, data_set, 'xw_pen_15.csv'), delimiter=',')
X = np.arange(485)[:, None]
return data_details_return({'Y': Y, 'X': X, 'info': "Tilt data from a personalized digital assistant pen. Plot in original paper showed regression between time steps 175 and 275."}, data_set)
def download_rogers_girolami_data(data_set='rogers_girolami_data'):
if not data_available('rogers_girolami_data'):
download_data(data_set)
path = os.path.join(data_path, data_set)
tar_file = os.path.join(path, 'firstcoursemldata.tar.gz')
tar = tarfile.open(tar_file)
print('Extracting file.')
tar.extractall(path=path)
tar.close()
def olympic_100m_men(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['male100']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Olympic sprint times for 100 m men from 1896 until 2008. Example is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_100m_women(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['female100']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Olympic sprint times for 100 m women from 1896 until 2008. Example is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_200m_women(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['female200']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Olympic 200 m winning times for women from 1896 until 2008. Data is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_200m_men(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['male200']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Male 200 m winning times for women from 1896 until 2008. Data is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_400m_women(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['female400']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Olympic 400 m winning times for women until 2008. Data is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_400m_men(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['male400']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Male 400 m winning times for women until 2008. Data is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_marathon_men(data_set='olympic_marathon_men'):
if not data_available(data_set):
download_data(data_set)
olympics = np.genfromtxt(os.path.join(data_path, data_set, 'olympicMarathonTimes.csv'), delimiter=',')
X = olympics[:, 0:1]
Y = olympics[:, 1:2]
return data_details_return({'X': X, 'Y': Y}, data_set)
def olympic_sprints(data_set='rogers_girolami_data'):
"""All olympics sprint winning times for multiple output prediction."""
X = np.zeros((0, 2))
Y = np.zeros((0, 1))
for i, dataset in enumerate([olympic_100m_men,
olympic_100m_women,
olympic_200m_men,
olympic_200m_women,
olympic_400m_men,
olympic_400m_women]):
data = dataset()
year = data['X']
time = data['Y']
X = np.vstack((X, np.hstack((year, np.ones_like(year)*i))))
Y = np.vstack((Y, time))
data['X'] = X
data['Y'] = Y
data['info'] = "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning."
return data_details_return({
'X': X,
'Y': Y,
'info': "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning.",
'output_info': {
0:'100m Men',
1:'100m Women',
2:'200m Men',
3:'200m Women',
4:'400m Men',
5:'400m Women'}
}, data_set)
# def movielens_small(partNo=1,seed=default_seed):
# np.random.seed(seed=seed)
# fileName = os.path.join(data_path, 'movielens', 'small', 'u' + str(partNo) + '.base')
# fid = open(fileName)
# uTrain = np.fromfile(fid, sep='\t', dtype=np.int16).reshape((-1, 4))
# fid.close()
# maxVals = np.amax(uTrain, axis=0)
# numUsers = maxVals[0]
# numFilms = maxVals[1]
# numRatings = uTrain.shape[0]
# Y = scipy.sparse.lil_matrix((numFilms, numUsers), dtype=np.int8)
# for i in range(numUsers):
# ind = pb.mlab.find(uTrain[:, 0]==i+1)
# Y[uTrain[ind, 1]-1, i] = uTrain[ind, 2]
# fileName = os.path.join(data_path, 'movielens', 'small', 'u' + str(partNo) + '.test')
# fid = open(fileName)
# uTest = np.fromfile(fid, sep='\t', dtype=np.int16).reshape((-1, 4))
# fid.close()
# numTestRatings = uTest.shape[0]
# Ytest = scipy.sparse.lil_matrix((numFilms, numUsers), dtype=np.int8)
# for i in range(numUsers):
# ind = pb.mlab.find(uTest[:, 0]==i+1)
# Ytest[uTest[ind, 1]-1, i] = uTest[ind, 2]
# lbls = np.empty((1,1))
# lblstest = np.empty((1,1))
# return {'Y':Y, 'lbls':lbls, 'Ytest':Ytest, 'lblstest':lblstest}
def crescent_data(num_data=200, seed=default_seed):
"""
Data set formed from a mixture of four Gaussians. In each class two of the Gaussians are elongated at right angles to each other and offset to form an approximation to the crescent data that is popular in semi-supervised learning as a toy problem.
:param num_data_part: number of data to be sampled (default is 200).
:type num_data: int
:param seed: random seed to be used for data generation.
:type seed: int
"""
np.random.seed(seed=seed)
sqrt2 = np.sqrt(2)
# Rotation matrix
R = np.array([[sqrt2 / 2, -sqrt2 / 2], [sqrt2 / 2, sqrt2 / 2]])
# Scaling matrices
scales = []
scales.append(np.array([[3, 0], [0, 1]]))
scales.append(np.array([[3, 0], [0, 1]]))
scales.append([[1, 0], [0, 3]])
scales.append([[1, 0], [0, 3]])
means = []
means.append(np.array([4, 4]))
means.append(np.array([0, 4]))
means.append(np.array([-4, -4]))
means.append(np.array([0, -4]))
Xparts = []
num_data_part = []
num_data_total = 0
for i in range(0, 4):
num_data_part.append(round(((i + 1) * num_data) / 4.))
num_data_part[i] -= num_data_total
part = np.random.normal(size=(num_data_part[i], 2))
part = np.dot(np.dot(part, scales[i]), R) + means[i]
Xparts.append(part)
num_data_total += num_data_part[i]
X = np.vstack((Xparts[0], Xparts[1], Xparts[2], Xparts[3]))
Y = np.vstack((np.ones((num_data_part[0] + num_data_part[1], 1)), -np.ones((num_data_part[2] + num_data_part[3], 1))))
return {'X':X, 'Y':Y, 'info': "Two separate classes of data formed approximately in the shape of two crescents."}
def creep_data(data_set='creep_rupture'):
"""Brun and Yoshida's metal creep rupture data."""
if not data_available(data_set):
download_data(data_set)
path = os.path.join(data_path, data_set)
tar_file = os.path.join(path, 'creeprupt.tar')
tar = tarfile.open(tar_file)
print('Extracting file.')
tar.extractall(path=path)
tar.close()
all_data = np.loadtxt(os.path.join(data_path, data_set, 'taka'))
y = all_data[:, 1:2].copy()
features = [0]
features.extend(range(2, 31))
X = all_data[:, features].copy()
return data_details_return({'X': X, 'y': y}, data_set)
def cifar10_patches(data_set='cifar-10'):
"""The Candian Institute for Advanced Research 10 image data set. Code for loading in this data is taken from this Boris Babenko's blog post, original code available here: http://bbabenko.tumblr.com/post/86756017649/learning-low-level-vision-feautres-in-10-lines-of-code"""
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'cifar-10-python.tar.gz')
if not data_available(data_set):
download_data(data_set)
import tarfile
# This code is from Boris Babenko's blog post.
# http://bbabenko.tumblr.com/post/86756017649/learning-low-level-vision-feautres-in-10-lines-of-code
tfile = tarfile.open(filename, 'r:gz')
tfile.extractall(dir_path)
with open(os.path.join(dir_path, 'cifar-10-batches-py','data_batch_1'),'rb') as f:
data = pickle.load(f)
images = data['data'].reshape((-1,3,32,32)).astype('float32')/255
images = np.rollaxis(images, 1, 4)
patches = np.zeros((0,5,5,3))
for x in range(0,32-5,5):
for y in range(0,32-5,5):
patches = np.concatenate((patches, images[:,x:x+5,y:y+5,:]), axis=0)
patches = patches.reshape((patches.shape[0],-1))
return data_details_return({'Y': patches, "info" : "32x32 pixel patches extracted from the CIFAR-10 data by Boris Babenko to demonstrate k-means features."}, data_set)
def cmu_mocap_49_balance(data_set='cmu_mocap'):
"""Load CMU subject 49's one legged balancing motion that was used by Alvarez, Luengo and Lawrence at AISTATS 2009."""
train_motions = ['18', '19']
test_motions = ['20']
data = cmu_mocap('49', train_motions, test_motions, sample_every=4, data_set=data_set)
data['info'] = "One legged balancing motions from CMU data base subject 49. As used in Alvarez, Luengo and Lawrence at AISTATS 2009. It consists of " + data['info']
return data
def cmu_mocap_35_walk_jog(data_set='cmu_mocap'):
"""Load CMU subject 35's walking and jogging motions, the same data that was used by Taylor, Roweis and Hinton at NIPS 2007. but without their preprocessing. Also used by Lawrence at AISTATS 2007."""
train_motions = ['01', '02', '03', '04', '05', '06',
'07', '08', '09', '10', '11', '12',
'13', '14', '15', '16', '17', '19',
'20', '21', '22', '23', '24', '25',
'26', '28', '30', '31', '32', '33', '34']
test_motions = ['18', '29']
data = cmu_mocap('35', train_motions, test_motions, sample_every=4, data_set=data_set)
data['info'] = "Walk and jog data from CMU data base subject 35. As used in Tayor, Roweis and Hinton at NIPS 2007, but without their pre-processing (i.e. as used by Lawrence at AISTATS 2007). It consists of " + data['info']
return data
def cmu_mocap(subject, train_motions, test_motions=[], sample_every=4, data_set='cmu_mocap'):
"""Load a given subject's training and test motions from the CMU motion capture data."""
# Load in subject skeleton.
subject_dir = os.path.join(data_path, data_set)
# Make sure the data is downloaded.
all_motions = train_motions + test_motions
resource = cmu_urls_files(([subject], [all_motions]))
data_resources[data_set] = data_resources['cmu_mocap_full'].copy()
data_resources[data_set]['files'] = resource['files']
data_resources[data_set]['urls'] = resource['urls']
if resource['urls']:
download_data(data_set)
skel = GPy.util.mocap.acclaim_skeleton(os.path.join(subject_dir, subject + '.asf'))
# Set up labels for each sequence
exlbls = np.eye(len(train_motions))
# Load sequences
tot_length = 0
temp_Y = []
temp_lbls = []
for i in range(len(train_motions)):
temp_chan = skel.load_channels(os.path.join(subject_dir, subject + '_' + train_motions[i] + '.amc'))
temp_Y.append(temp_chan[::sample_every, :])
temp_lbls.append(np.tile(exlbls[i, :], (temp_Y[i].shape[0], 1)))
tot_length += temp_Y[i].shape[0]
Y = np.zeros((tot_length, temp_Y[0].shape[1]))
lbls = np.zeros((tot_length, temp_lbls[0].shape[1]))
end_ind = 0
for i in range(len(temp_Y)):
start_ind = end_ind
end_ind += temp_Y[i].shape[0]
Y[start_ind:end_ind, :] = temp_Y[i]
lbls[start_ind:end_ind, :] = temp_lbls[i]
if len(test_motions) > 0:
temp_Ytest = []
temp_lblstest = []
testexlbls = np.eye(len(test_motions))
tot_test_length = 0
for i in range(len(test_motions)):
temp_chan = skel.load_channels(os.path.join(subject_dir, subject + '_' + test_motions[i] + '.amc'))
temp_Ytest.append(temp_chan[::sample_every, :])
temp_lblstest.append(np.tile(testexlbls[i, :], (temp_Ytest[i].shape[0], 1)))
tot_test_length += temp_Ytest[i].shape[0]
# Load test data
Ytest = np.zeros((tot_test_length, temp_Ytest[0].shape[1]))
lblstest = np.zeros((tot_test_length, temp_lblstest[0].shape[1]))
end_ind = 0
for i in range(len(temp_Ytest)):
start_ind = end_ind
end_ind += temp_Ytest[i].shape[0]
Ytest[start_ind:end_ind, :] = temp_Ytest[i]
lblstest[start_ind:end_ind, :] = temp_lblstest[i]
else:
Ytest = None
lblstest = None
info = 'Subject: ' + subject + '. Training motions: '
for motion in train_motions:
info += motion + ', '
info = info[:-2]
if len(test_motions) > 0:
info += '. Test motions: '
for motion in test_motions:
info += motion + ', '
info = info[:-2] + '.'
else:
info += '.'
if sample_every != 1:
info += ' Data is sub-sampled to every ' + str(sample_every) + ' frames.'
return data_details_return({'Y': Y, 'lbls' : lbls, 'Ytest': Ytest, 'lblstest' : lblstest, 'info': info, 'skel': skel}, data_set)
| bsd-3-clause |
JungeAlexander/cocoscore | tests/tagger/test_co_occurrence_score.py | 1 | 54674 | import numpy
import pandas
from pandas.util.testing import assert_frame_equal
from pytest import approx
from pytest import raises
import cocoscore.tagger.co_occurrence_score as co_occurrence_score
import cocoscore.tools.data_tools as dt
from cocoscore.ml.distance_scores import polynomial_decay_distance
from cocoscore.ml.fasttext_helpers import fasttext_fit_predict_default
def fasttext_function(train, valid, epochs, dim, bucket):
return fasttext_fit_predict_default(train, valid,
epochs=epochs,
dim=dim,
bucket=bucket)
class TestClass(object):
matches_file_path = 'tests/tagger/matches_file.tsv'
matches_file_same_type_path = 'tests/tagger/matches_file_same_type.tsv'
matches_document_level_comentions_file_path = 'tests/tagger/matches_file_document_level_comentions.tsv'
matches_file_single_matches_path = 'tests/tagger/matches_file_single_matches.tsv'
matches_file_cross_path = 'tests/tagger/matches_file_cross.tsv'
matches_file_cross_fantasy_types_path = 'tests/tagger/matches_file_cross_fantasy_types.tsv'
sentence_score_file_path = 'tests/tagger/sentence_scores_file.tsv'
paragraph_score_file_path = 'tests/tagger/paragraph_scores_file.tsv'
document_score_file_path = 'tests/tagger/document_scores_file.tsv'
paragraph_sentence_score_file_path = 'tests/tagger/paragraph_sentence_scores_file.tsv'
document_paragraph_sentence_score_file_path = 'tests/tagger/document_paragraph_sentence_scores_file.tsv'
document_paragraph_score_file_path = 'tests/tagger/document_paragraph_scores_file.tsv'
precedence_document_paragraph_sentence_score_file_path = \
'tests/tagger/precedence_document_paragraph_sentence_scores_file.tsv'
entity_file_path = 'tests/tagger/entities2.tsv.gz'
entity_fantasy_types_file_path = 'tests/tagger/entities2_fantasy_types.tsv.gz'
entity_file_same_type_path = 'tests/tagger/entities2_same_type.tsv.gz'
cos_cv_test_path = 'tests/ml/cos_simple_cv.txt'
def test_load_sentence_scores(self):
sentence_scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
assert {('--D', 'A'): {(1111, 1, 2): 0.9, (1111, 2, 3): 0.5,
(3333, 2, 2): 0.4, (3333, 2, 3): 0.44},
('B', 'C'): {(2222, 1, 1): 0}} == sentence_scores
def test_load_sentence_scores_score_cutoff(self):
sentence_scores = co_occurrence_score.load_score_file(self.sentence_score_file_path,
cutoff=0.5)
assert {('--D', 'A'): {(1111, 1, 2): 0.9, (1111, 2, 3): 0.5}} == sentence_scores
def test_load_paragraph_scores(self):
paragraph_scores = co_occurrence_score.load_score_file(self.paragraph_score_file_path)
assert {('--D', 'A'): {(1111, 1): 0.9, (1111, 2): 0.5,
(3333, 2): 0.4},
('B', 'C'): {(2222, 1): 0}} == paragraph_scores
def test_load_document_scores(self):
document_scores = co_occurrence_score.load_score_file(self.document_score_file_path)
assert {('--D', 'A'): {1111: 1,
3333: 2},
('B', 'C'): {2222: 3}} == document_scores
def test_weighted_counts_sentences(self):
sentence_scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
weighted_counts = co_occurrence_score.get_weighted_counts(None, sentence_scores, None, None, None,
first_type=9606, second_type=-26,
document_weight=15.0, paragraph_weight=0,
sentence_weight=1.0)
assert {('--D', 'A'): 15.9 + 15.44,
('B', 'C'): 15,
'A': 15.9 + 15.44,
'--D': 15.9 + 15.44,
'B': 15,
'C': 15,
None: 15.9 + 15.44 + 15} == approx(weighted_counts)
def test_weighted_counts_sentences_paragraphs(self):
scores = co_occurrence_score.load_score_file(self.paragraph_sentence_score_file_path)
sentence_scores, paragraph_scores, _ = co_occurrence_score.split_scores(scores)
weighted_counts = co_occurrence_score.get_weighted_counts(None, sentence_scores, paragraph_scores, None, None,
first_type=9606, second_type=-26,
document_weight=15.0, paragraph_weight=1.0,
sentence_weight=1.0)
assert {('--D', 'A'): 15.9 + 0.9 + 15.44 + 0.4,
('B', 'C'): 15,
'A': 15.9 + 0.9 + 15.44 + 0.4,
'--D': 15.9 + 0.9 + 15.44 + 0.4,
'B': 15,
'C': 15,
None: 15.9 + 0.9 + 15.44 + 0.4 + 15} == approx(weighted_counts)
def test_weighted_counts_paragraphs(self):
paragraph_scores = co_occurrence_score.load_score_file(self.paragraph_score_file_path)
weighted_counts = co_occurrence_score.get_weighted_counts(None, None, paragraph_scores, None, None,
first_type=9606, second_type=-26,
document_weight=15.0, paragraph_weight=1.0,
sentence_weight=1.0)
assert {('--D', 'A'): 15.0 + 0.9 + 15.0 + 0.4,
('B', 'C'): 15.0,
'A': 15.0 + 0.9 + 15.0 + 0.4,
'--D': 15.0 + 0.9 + 15.0 + 0.4,
'B': 15.0,
'C': 15.0,
None: 15.0 + 0.9 + 15.0 + 0.4 + 15.0} == approx(weighted_counts)
def test_weighted_counts_sentences_paragraphs_documents(self):
scores = co_occurrence_score.load_score_file(self.document_paragraph_sentence_score_file_path)
sentence_scores, paragraph_scores, document_scores = co_occurrence_score.split_scores(scores)
weighted_counts = co_occurrence_score.get_weighted_counts(None, sentence_scores, paragraph_scores,
document_scores, None,
first_type=9606, second_type=-26,
document_weight=2.0, paragraph_weight=1.0,
sentence_weight=1.0)
assert {('--D', 'A'): 0.9 + 0.9 + 1 * 2 + 0.44 + 0.4 + 2 * 2,
('B', 'C'): 3 * 2,
'A': 0.9 + 0.9 + 1 * 2 + 0.44 + 0.4 + 2 * 2,
'--D': 0.9 + 0.9 + 1 * 2 + 0.44 + 0.4 + 2 * 2,
'B': 3 * 2,
'C': 3 * 2,
None: 0.9 + 0.9 + 1 * 2 + 0.44 + 0.4 + 2 * 2 + 3 * 2} == weighted_counts
def test_weighted_counts_documents(self):
document_scores = co_occurrence_score.load_score_file(self.document_score_file_path)
weighted_counts = co_occurrence_score.get_weighted_counts(None, None, None,
document_scores, None,
first_type=9606, second_type=-26,
document_weight=2.0, paragraph_weight=1.0,
sentence_weight=2.0)
assert {('--D', 'A'): 1 * 2 + 2 * 2,
('B', 'C'): 3 * 2,
'A': 1 * 2 + 2 * 2,
'--D': 1 * 2 + 2 * 2,
'B': 3 * 2,
'C': 3 * 2,
None: 1 * 2 + 2 * 2 + 3 * 2} == weighted_counts
def test_weighted_counts_paragraphs_documents(self):
paragraph_scores = co_occurrence_score.load_score_file(self.paragraph_score_file_path, )
document_scores = co_occurrence_score.load_score_file(self.document_score_file_path)
weighted_counts = co_occurrence_score.get_weighted_counts(None, None, paragraph_scores,
document_scores, None,
first_type=9606, second_type=-26,
document_weight=2.0, paragraph_weight=1.0,
sentence_weight=1.0)
assert {('--D', 'A'): 0.9 + 1 * 2. + 0.4 + 2 * 2.,
('B', 'C'): 3 * 2.,
'A': 0.9 + 1 * 2. + 0.4 + 2 * 2.,
'--D': 0.9 + 1 * 2. + 0.4 + 2 * 2.,
'B': 3 * 2.,
'C': 3 * 2.,
None: 0.9 + 1 * 2. + 0.4 + 2 * 2. + 3 * 2.} == approx(weighted_counts)
def test_co_occurrence_score_sentences(self):
sentence_scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
document_weight = 15.0
paragraph_weight = 0
weighting_exponent = 0.6
counts = co_occurrence_score.get_weighted_counts(None, sentence_scores, None, None, None,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=1.0)
scores = co_occurrence_score.co_occurrence_score(None, self.sentence_score_file_path, None,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
weighting_exponent=weighting_exponent)
c_a_d = counts[('--D', 'A')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
def test_co_occurrence_score_sentences_paragraphs(self):
scores = co_occurrence_score.load_score_file(self.paragraph_sentence_score_file_path)
sentence_scores, paragraph_scores, _ = co_occurrence_score.split_scores(scores)
document_weight = 15.0
paragraph_weight = 1.0
weighting_exponent = 0.6
counts = co_occurrence_score.get_weighted_counts(None, sentence_scores, paragraph_scores, None, None,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=1.0)
scores = co_occurrence_score.co_occurrence_score(None, self.paragraph_sentence_score_file_path, None,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
weighting_exponent=weighting_exponent)
c_a_d = counts[('--D', 'A')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
def test_co_occurrence_score_sentences_documents(self):
scores = co_occurrence_score.load_score_file(self.document_paragraph_sentence_score_file_path)
sentence_scores, paragraph_scores, document_scores = co_occurrence_score.split_scores(scores)
document_weight = 15.0
paragraph_weight = 1.0
weighting_exponent = 0.6
counts = co_occurrence_score.get_weighted_counts(None, sentence_scores, paragraph_scores, document_scores, None,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=1.0)
scores = co_occurrence_score.co_occurrence_score(None, self.document_paragraph_sentence_score_file_path, None,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
weighting_exponent=weighting_exponent)
c_a_d = counts[('--D', 'A')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
def test_co_occurrence_score_precedence_sentences_paragraphs_documents(self):
scores = co_occurrence_score.load_score_file(self.precedence_document_paragraph_sentence_score_file_path)
sentence_scores, paragraph_scores, document_scores = co_occurrence_score.split_scores(scores)
document_weight = 2.0
paragraph_weight = 1.0
sentence_weight = 1.0
weighted_counts = co_occurrence_score.get_weighted_counts(None, sentence_scores, paragraph_scores,
document_scores, None,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=sentence_weight,
ignore_scores=True)
weight_sum = document_weight + paragraph_weight + sentence_weight
assert {('B', 'C'): weight_sum,
'B': weight_sum,
'C': weight_sum,
None: weight_sum} == weighted_counts
def test_weighted_counts_sentences_only_diseases(self):
sentence_scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
weighted_counts = co_occurrence_score.get_weighted_counts(None, sentence_scores, None, None, None,
first_type=9606, second_type=-26,
document_weight=15.0, paragraph_weight=0,
sentence_weight=1.0,
ignore_scores=True)
assert {('--D', 'A'): 32,
('B', 'C'): 16,
'A': 32,
'--D': 32,
'B': 16,
'C': 16,
None: 48} == weighted_counts
def test_co_occurrence_score_sentences_only_diseases(self):
scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
sentence_scores, _, _ = co_occurrence_score.split_scores(scores)
document_weight = 15.0
paragraph_weight = 0
weighting_exponent = 0.6
counts = co_occurrence_score.get_weighted_counts(None, sentence_scores, None, None, None,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=1.0,
ignore_scores=True)
scores = co_occurrence_score.co_occurrence_score(None, self.sentence_score_file_path, None,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
weighting_exponent=weighting_exponent,
ignore_scores=True)
c_a_d = counts[('--D', 'A')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
def test_weighted_counts_matches_file(self):
sentence_scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
weighted_counts = co_occurrence_score.get_weighted_counts(self.matches_file_path, sentence_scores, None, None,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=15.0, paragraph_weight=0,
sentence_weight=1.0)
assert 15.9 + 15.44 + 15. == approx(weighted_counts[None]) # needed due to floating point strangeness
del weighted_counts[None]
assert {('--D', 'A'): 15.9 + 15.44,
('B', 'C'): 15.,
'A': 15.9 + 15.44,
'--D': 15.9 + 15.44,
'B': 15.,
'C': 15.} == weighted_counts
def test_co_occurrence_score_matches_file(self):
scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
sentence_scores, _, _ = co_occurrence_score.split_scores(scores)
document_weight = 15.0
paragraph_weight = 0
weighting_exponent = 0.6
counts = co_occurrence_score.get_weighted_counts(self.matches_file_path, sentence_scores, None, None,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=1.0)
scores = co_occurrence_score.co_occurrence_score(self.matches_file_path, self.sentence_score_file_path,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
weighting_exponent=weighting_exponent)
c_a_d = counts[('--D', 'A')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
def test_co_occurrence_score_matches_file_same_type(self):
scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
sentence_scores, _, _ = co_occurrence_score.split_scores(scores)
document_weight = 15.0
paragraph_weight = 0
weighting_exponent = 0.6
counts = co_occurrence_score.get_weighted_counts(self.matches_file_same_type_path, sentence_scores, None, None,
self.entity_file_same_type_path,
first_type=2, second_type=2,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=1.0)
scores = co_occurrence_score.co_occurrence_score(self.matches_file_same_type_path,
self.sentence_score_file_path,
self.entity_file_same_type_path,
first_type=2, second_type=2,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
weighting_exponent=weighting_exponent)
c_a_d = counts[('--D', 'A')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
def test_co_occurrence_score_matches_file_diseases(self):
sentence_scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
document_weight = 15.0
paragraph_weight = 0
sentence_weight = 1.0
weighting_exponent = 0.6
counts = co_occurrence_score.get_weighted_counts(self.matches_file_path, sentence_scores, None, None,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=1.0,
ignore_scores=True)
scores = co_occurrence_score.co_occurrence_score_diseases(self.matches_file_path,
self.entity_file_path,
document_weight=document_weight,
sentence_weight=sentence_weight)
c_a_d = counts[('--D', 'A')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
def test_weighted_counts_matches_document_level_comentions_file(self):
sentence_scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
weighted_counts = co_occurrence_score.get_weighted_counts(self.matches_document_level_comentions_file_path,
sentence_scores, None, None,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=15.0, paragraph_weight=0,
sentence_weight=1.0)
assert {('--D', 'A'): 15. + 15.44,
('B', 'C'): 15.,
'A': 15. + 15.44,
'--D': 15. + 15.44,
'B': 15.,
'C': 15.,
None: 15. + 15.44 + 15.} == weighted_counts
def test_co_occurrence_score_matches_document_level_comentions_file(self):
scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
sentence_scores, _, _ = co_occurrence_score.split_scores(scores)
document_weight = 15.0
paragraph_weight = 0
weighting_exponent = 0.6
counts = co_occurrence_score.get_weighted_counts(self.matches_document_level_comentions_file_path,
sentence_scores, None, None,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=1.0)
scores = co_occurrence_score.co_occurrence_score(self.matches_document_level_comentions_file_path,
self.sentence_score_file_path,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
weighting_exponent=weighting_exponent)
c_a_d = counts[('--D', 'A')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
def test_co_occurrence_score_matches_document_level_comentions_file_diseases(self):
sentence_scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
document_weight = 15.0
paragraph_weight = 0
weighting_exponent = 0.6
sentence_weight = 1.0
counts = co_occurrence_score.get_weighted_counts(self.matches_document_level_comentions_file_path,
sentence_scores, None, None, self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=sentence_weight,
ignore_scores=True)
scores = co_occurrence_score.co_occurrence_score_diseases(self.matches_document_level_comentions_file_path,
self.entity_file_path,
document_weight=document_weight,
sentence_weight=sentence_weight)
c_a_d = counts[('--D', 'A')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
def test_weighted_counts_matches_single_matches_file(self):
sentence_scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
weighted_counts = co_occurrence_score.get_weighted_counts(self.matches_file_single_matches_path,
sentence_scores, None, None,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=15.0, paragraph_weight=0,
sentence_weight=1.0)
assert 15.9 + 15.44 + 15. == approx(weighted_counts[None]) # needed due to floating point strangeness
del weighted_counts[None]
assert {('--D', 'A'): 15.9 + 15.44,
('B', 'C'): 15.,
'A': 15.9 + 15.44,
'--D': 15.9 + 15.44,
'B': 15.,
'C': 15.} == weighted_counts
def test_co_occurrence_score_matches_single_matches_file(self):
scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
sentence_scores, _, _ = co_occurrence_score.split_scores(scores)
document_weight = 15.0
paragraph_weight = 0
weighting_exponent = 0.6
counts = co_occurrence_score.get_weighted_counts(self.matches_file_single_matches_path,
sentence_scores, None, None,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=1.0)
scores = co_occurrence_score.co_occurrence_score(self.matches_file_single_matches_path,
self.sentence_score_file_path,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
weighting_exponent=weighting_exponent)
c_a_d = counts[('--D', 'A')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
def test_co_occurrence_score_matches_single_matches_file_diseases(self):
sentence_scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
document_weight = 15.0
paragraph_weight = 0
weighting_exponent = 0.6
sentence_weight = 1.0
counts = co_occurrence_score.get_weighted_counts(self.matches_file_single_matches_path,
sentence_scores, None, None, self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=sentence_weight,
ignore_scores=True)
scores = co_occurrence_score.co_occurrence_score_diseases(self.matches_file_path,
self.entity_file_path,
document_weight=document_weight,
sentence_weight=sentence_weight)
c_a_d = counts[('--D', 'A')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
def test_weighted_counts_matches_file_cross(self):
sentence_scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
weighted_counts = co_occurrence_score.get_weighted_counts(self.matches_file_cross_path, sentence_scores,
None, None,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=15.0, paragraph_weight=0,
sentence_weight=1.0)
assert 15.9 + 15.44 + 15. + 15. == approx(weighted_counts[None]) # needed due to float inaccuracy
del weighted_counts[None]
assert 15.9 + 15.44 + 15. == approx(weighted_counts['--D'])
del weighted_counts['--D']
assert {('--D', 'A'): 15.9 + 15.44,
('--D', 'B'): 15.,
('B', 'C'): 15.,
'A': 15.9 + 15.44,
'B': 15. + 15.,
'C': 15.} == weighted_counts
def test_co_occurrence_score_matches_file_cross(self):
scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
sentence_scores, _, _ = co_occurrence_score.split_scores(scores)
document_weight = 15.0
paragraph_weight = 0
weighting_exponent = 0.6
counts = co_occurrence_score.get_weighted_counts(self.matches_file_cross_path, sentence_scores, None, None,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=1.0)
scores = co_occurrence_score.co_occurrence_score(self.matches_file_cross_path, self.sentence_score_file_path,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
weighting_exponent=weighting_exponent)
c_a_d = counts[('--D', 'A')]
c_d_b = counts[('--D', 'B')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
s_d_b = c_d_b ** weighting_exponent * ((c_d_b * c_all) / (c_b * c_d)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
assert s_d_b == approx(scores[('--D', 'B')])
def test_co_occurrence_score_matches_file_cross_swap_types(self):
scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
sentence_scores, _, _ = co_occurrence_score.split_scores(scores)
document_weight = 15.0
paragraph_weight = 0
weighting_exponent = 0.6
counts = co_occurrence_score.get_weighted_counts(self.matches_file_cross_path, sentence_scores,
None, None,
self.entity_file_path,
first_type=-26, second_type=9606,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=1.0)
scores = co_occurrence_score.co_occurrence_score(self.matches_file_cross_path, self.sentence_score_file_path,
self.entity_file_path,
first_type=-26, second_type=9606,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
weighting_exponent=weighting_exponent)
c_a_d = counts[('--D', 'A')]
c_d_b = counts[('--D', 'B')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
s_d_b = c_d_b ** weighting_exponent * ((c_d_b * c_all) / (c_b * c_d)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
assert s_d_b == approx(scores[('--D', 'B')])
def test_co_occurrence_score_matches_file_cross_fantasy_types(self):
scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
sentence_scores, _, _ = co_occurrence_score.split_scores(scores)
document_weight = 15.0
paragraph_weight = 0
weighting_exponent = 0.6
counts = co_occurrence_score.get_weighted_counts(self.matches_file_cross_fantasy_types_path, sentence_scores,
None, None,
self.entity_fantasy_types_file_path,
first_type=1, second_type=2,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=1.0)
scores = co_occurrence_score.co_occurrence_score(self.matches_file_cross_fantasy_types_path,
self.sentence_score_file_path,
self.entity_fantasy_types_file_path,
first_type=1, second_type=2,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
weighting_exponent=weighting_exponent)
c_a_d = counts[('--D', 'A')]
c_d_b = counts[('--D', 'B')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
s_d_b = c_d_b ** weighting_exponent * ((c_d_b * c_all) / (c_b * c_d)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
assert s_d_b == approx(scores[('--D', 'B')])
def test_co_occurrence_score_matches_file_cross_diseases(self):
sentence_scores = co_occurrence_score.load_score_file(self.sentence_score_file_path)
document_weight = 15.0
paragraph_weight = 0
weighting_exponent = 0.6
sentence_weight = 1.0
counts = co_occurrence_score.get_weighted_counts(self.matches_file_cross_path, sentence_scores,
None, None,
self.entity_file_path,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=sentence_weight,
ignore_scores=True)
scores = co_occurrence_score.co_occurrence_score_diseases(self.matches_file_cross_path,
self.entity_file_path,
document_weight=document_weight,
sentence_weight=sentence_weight)
c_a_d = counts[('--D', 'A')]
c_d_b = counts[('--D', 'B')]
c_a = counts['A']
c_d = counts['--D']
c_all = counts[None]
s_a_d = c_a_d ** weighting_exponent * ((c_a_d * c_all) / (c_a * c_d)) ** (1 - weighting_exponent)
c_b_c = counts[('B', 'C')]
c_b = counts['B']
c_c = counts['C']
s_b_c = c_b_c ** weighting_exponent * ((c_b_c * c_all) / (c_b * c_c)) ** (1 - weighting_exponent)
s_d_b = c_d_b ** weighting_exponent * ((c_d_b * c_all) / (c_b * c_d)) ** (1 - weighting_exponent)
assert s_a_d == approx(scores[('--D', 'A')])
assert s_b_c == approx(scores[('B', 'C')])
assert s_d_b == approx(scores[('--D', 'B')])
def test_cocoscore_cv_independent_associations(self):
sentence_weight = 1
paragraph_weight = 1
document_weight = 1
cv_folds = 2
test_df = dt.load_data_frame(self.cos_cv_test_path, match_distance=True)
test_df['text'] = test_df['text'].apply(lambda s: s.strip().lower())
cv_results = co_occurrence_score.cv_independent_associations(test_df,
{'sentence_weight': sentence_weight,
'paragraph_weight': paragraph_weight,
'document_weight': document_weight,
},
cv_folds=cv_folds,
random_state=numpy.random.RandomState(3),
fasttext_epochs=5,
fasttext_bucket=1000,
fasttext_dim=20)
expected_col_names = [
'mean_test_score',
'stdev_test_score',
'mean_train_score',
'stdev_train_score',
'split_0_test_score',
'split_0_train_score',
'split_0_n_test',
'split_0_pos_test',
'split_0_n_train',
'split_0_pos_train',
'split_1_test_score',
'split_1_train_score',
'split_1_n_test',
'split_1_pos_test',
'split_1_n_train',
'split_1_pos_train',
]
cv_runs = 1
expected_values = [
[1.0] * cv_runs,
[0.0] * cv_runs,
[1.0] * cv_runs,
[0.0] * cv_runs,
[1.0] * cv_runs,
[1.0] * cv_runs,
[24] * cv_runs,
[0.5] * cv_runs,
[24] * cv_runs,
[0.5] * cv_runs,
[1.0] * cv_runs,
[1.0] * cv_runs,
[24] * cv_runs,
[0.5] * cv_runs,
[24] * cv_runs,
[0.5] * cv_runs,
]
expected_df = pandas.DataFrame({col: values for col, values in zip(expected_col_names, expected_values)},
columns=expected_col_names)
assert_frame_equal(cv_results, expected_df)
def test_cocoscore_cv_independent_associations_bad_param(self):
test_df = dt.load_data_frame(self.cos_cv_test_path, match_distance=True)
test_df['text'] = test_df['text'].apply(lambda s: s.strip().lower())
with raises(TypeError, match="got an unexpected keyword argument"):
_ = co_occurrence_score.cv_independent_associations(test_df, {'sentence_weightXXXX': 1,
'paragraph_weight': 1,
'document_weight': 1,
},
cv_folds=2,
random_state=numpy.random.RandomState(3),
fasttext_epochs=5,
fasttext_bucket=1000,
fasttext_dim=20,
constant_scoring='document')
def test_cocoscore_cv_independent_associations_bad_constant_scoring(self):
test_df = dt.load_data_frame(self.cos_cv_test_path, match_distance=True)
test_df['text'] = test_df['text'].apply(lambda s: s.strip().lower())
with raises(ValueError, match='Unknown constant_scoring parameter: documenti'):
_ = co_occurrence_score.cv_independent_associations(test_df, {'sentence_weight': 1,
'paragraph_weight': 1,
'document_weight': 1,
},
cv_folds=2,
random_state=numpy.random.RandomState(3),
fasttext_epochs=5,
fasttext_bucket=1000,
fasttext_dim=20,
constant_scoring='documenti')
def test_cocoscore_constant_sentence_scoring(self):
df = dt.load_data_frame(self.cos_cv_test_path, match_distance=True)
df['text'] = df['text'].apply(lambda s: s.strip().lower())
train_df = df.copy()
test_df = df.copy()
def nmdf(data_frame):
return polynomial_decay_distance(data_frame, 0, -2, 1)
train_scores, test_scores = co_occurrence_score._get_train_test_scores(train_df, test_df, fasttext_function,
fasttext_epochs=5, fasttext_dim=20,
fasttext_bucket=1000,
match_distance_function=nmdf,
constant_scoring='sentence')
sentence_matches = numpy.logical_and(df['sentence'] != -1, df['paragraph'] != -1)
non_sentence_matches = numpy.logical_not(sentence_matches)
for scores in (train_scores, test_scores):
assert (scores[sentence_matches] == 1).all()
assert (scores[non_sentence_matches] == -1).all()
def test_cocoscore_constant_paragraph_scoring(self):
df = dt.load_data_frame(self.cos_cv_test_path, match_distance=True)
df['text'] = df['text'].apply(lambda s: s.strip().lower())
train_df = df.copy()
test_df = df.copy()
def nmdf(data_frame):
return polynomial_decay_distance(data_frame, 0, -2, 1)
train_scores, test_scores = co_occurrence_score._get_train_test_scores(train_df, test_df, fasttext_function,
fasttext_epochs=5, fasttext_dim=20,
fasttext_bucket=1000,
match_distance_function=nmdf,
constant_scoring='paragraph')
paragraph_matches = numpy.logical_and(df['sentence'] == -1, df['paragraph'] != -1)
document_matches = numpy.logical_and(df['sentence'] == -1, df['paragraph'] == -1)
for scores in (train_scores, test_scores):
assert (scores[paragraph_matches] == 1).all()
assert (scores[document_matches] == -1).all()
def test_cocoscore_constant_document_scoring(self):
df = dt.load_data_frame(self.cos_cv_test_path, match_distance=True)
df['text'] = df['text'].apply(lambda s: s.strip().lower())
train_df = df.copy()
test_df = df.copy()
def nmdf(data_frame):
return polynomial_decay_distance(data_frame, 0, -2, 1)
train_scores, test_scores = co_occurrence_score._get_train_test_scores(train_df, test_df, fasttext_function,
fasttext_epochs=5, fasttext_dim=20,
fasttext_bucket=1000,
match_distance_function=nmdf,
constant_scoring='document')
paragraph_matches = numpy.logical_and(df['sentence'] == -1, df['paragraph'] != -1)
document_matches = numpy.logical_and(df['sentence'] == -1, df['paragraph'] == -1)
for scores in (train_scores, test_scores):
assert (scores[paragraph_matches] == -1).all()
assert (scores[document_matches] == 1).all()
def test_fit_score_default(self):
df = dt.load_data_frame(self.cos_cv_test_path, match_distance=True)
train_df = df.copy()
test_df = df.copy()
pairs = [('A', 'B'), ('C', 'D'), ('E', 'F'), ('G', 'H')]
train_scores, test_scores = co_occurrence_score.fit_score_default(train_df, test_df,
fasttext_epochs=5,
fasttext_dim=20,
fasttext_bucket=1000)
for pair in pairs:
assert train_scores[pair] > 0
assert test_scores[pair] > 0
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.